aboutsummaryrefslogtreecommitdiff
path: root/llvm
diff options
context:
space:
mode:
authorWang Pengcheng <wangpengcheng.pp@bytedance.com>2024-04-16 21:27:31 +0800
committerWang Pengcheng <wangpengcheng.pp@bytedance.com>2024-04-16 21:27:31 +0800
commit36640769547bedf26ddf149132c1b75f9e088a21 (patch)
treeae7d210d8d7593f7bb672006e4a31f192a02b9c5 /llvm
parentd72e50aae48ffed5fb6c1a9ad6bfc47c5ca93230 (diff)
parente7fb49c24e4be4780ee4df9829980c5e8ddd511e (diff)
downloadllvm-users/wangpc-pp/spr/riscv-re-model-rvv-comparison-instructions.zip
llvm-users/wangpc-pp/spr/riscv-re-model-rvv-comparison-instructions.tar.gz
llvm-users/wangpc-pp/spr/riscv-re-model-rvv-comparison-instructions.tar.bz2
Created using spr 1.3.6-beta.1
Diffstat (limited to 'llvm')
-rw-r--r--llvm/include/llvm/CodeGen/GlobalISel/Utils.h4
-rw-r--r--llvm/lib/Analysis/Lint.cpp5
-rw-r--r--llvm/lib/CodeGen/GlobalISel/Utils.cpp44
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp16
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp10
-rw-r--r--llvm/lib/CodeGen/TargetLoweringBase.cpp12
-rw-r--r--llvm/lib/IR/AutoUpgrade.cpp9
-rw-r--r--llvm/lib/IR/Verifier.cpp5
-rw-r--r--llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp37
-rw-r--r--llvm/lib/Target/AMDGPU/GCNCreateVOPD.cpp1
-rw-r--r--llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp7
-rw-r--r--llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp7
-rw-r--r--llvm/lib/Target/Mips/MipsRegisterBankInfo.cpp24
-rw-r--r--llvm/lib/Target/PowerPC/GISel/PPCRegisterBankInfo.cpp39
-rw-r--r--llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp57
-rw-r--r--llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp40
-rw-r--r--llvm/lib/Target/RISCV/RISCVFeatures.td6
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.cpp244
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.h59
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfo.cpp66
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td92
-rw-r--r--llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp89
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVTargetMachine.cpp4
-rw-r--r--llvm/lib/Target/X86/GISel/X86RegisterBankInfo.cpp139
-rw-r--r--llvm/lib/Target/X86/GISel/X86RegisterBankInfo.h16
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp8
-rw-r--r--llvm/lib/Transforms/Utils/Local.cpp18
-rw-r--r--llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp4
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/vastart.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/and-sink.ll9
-rw-r--r--llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll122
-rw-r--r--llvm/test/CodeGen/AMDGPU/bf16.ll22
-rw-r--r--llvm/test/CodeGen/AMDGPU/build_vector.ll97
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.is.fpclass.f16.ll20
-rw-r--r--llvm/test/CodeGen/AMDGPU/load-constant-i16.ll22
-rw-r--r--llvm/test/CodeGen/AMDGPU/load-constant-i8.ll90
-rw-r--r--llvm/test/CodeGen/AMDGPU/sgpr-spill-overlap-wwm-reserve.mir26
-rw-r--r--llvm/test/CodeGen/AMDGPU/vopd-combine.mir32
-rw-r--r--llvm/test/CodeGen/Hexagon/vect/zext-v4i1.ll18
-rw-r--r--llvm/test/CodeGen/RISCV/prefer-w-inst.ll105
-rw-r--r--llvm/test/CodeGen/RISCV/prefer-w-inst.mir262
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/calling-conv.ll203
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll5
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll19
-rw-r--r--llvm/test/CodeGen/RISCV/strip-w-suffix.ll74
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/fconstant.ll13
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/regbankselect-sse-intrinsics.ll153
-rw-r--r--llvm/test/CodeGen/X86/GlobalISel/regbankselect-x87.ll29
-rw-r--r--llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll42
-rw-r--r--llvm/test/CodeGen/X86/dpbusd.ll2
-rw-r--r--llvm/test/CodeGen/X86/dpbusd_i4.ll2
-rw-r--r--llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-3.ll1300
-rw-r--r--llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll602
-rw-r--r--llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll31
-rw-r--r--llvm/test/CodeGen/X86/vector-shuffle-combining-avx512f.ll44
-rw-r--r--llvm/test/CodeGen/X86/vector-shuffle-combining-sse41.ll15
-rw-r--r--llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll28
-rw-r--r--llvm/test/MC/LoongArch/Relocations/relax-addsub.s2
-rw-r--r--llvm/test/MC/LoongArch/Relocations/relax-align.s14
-rw-r--r--llvm/test/Other/lint.ll7
-rw-r--r--llvm/test/Transforms/InstCombine/minmax-intrinsics.ll89
-rw-r--r--llvm/test/Transforms/JumpThreading/pr79175.ll8
-rw-r--r--llvm/test/Transforms/JumpThreading/select.ll50
-rw-r--r--llvm/test/Transforms/JumpThreading/thread-prob-7.ll8
-rw-r--r--llvm/test/Transforms/JumpThreading/uncond-no-phi.ll123
-rw-r--r--llvm/test/Transforms/PhaseOrdering/thread-uncond-bb.ll62
-rw-r--r--llvm/test/Transforms/SCCP/pr50901.ll41
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/trunc-store-value-ty-not-power-of-2.ll33
-rw-r--r--llvm/test/Verifier/variadic.ll8
-rw-r--r--llvm/tools/llvm-exegesis/lib/SubprocessMemory.cpp7
-rw-r--r--llvm/unittests/Bitcode/DataLayoutUpgradeTest.cpp27
-rw-r--r--llvm/unittests/Transforms/Utils/LocalTest.cpp21
-rw-r--r--llvm/utils/gn/secondary/lldb/test/BUILD.gn1
73 files changed, 3270 insertions, 1682 deletions
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/Utils.h b/llvm/include/llvm/CodeGen/GlobalISel/Utils.h
index 807cec3..c4174ce 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/Utils.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/Utils.h
@@ -555,5 +555,9 @@ void eraseInstr(MachineInstr &MI, MachineRegisterInfo &MRI,
/// debug users of \p MI by writing the effect of \p MI in a DIExpression.
void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI);
+/// Returns whether opcode \p Opc is a pre-isel generic floating-point opcode,
+/// having only floating-point operands.
+bool isPreISelGenericFloatingPointOpcode(unsigned Opc);
+
} // End namespace llvm.
#endif
diff --git a/llvm/lib/Analysis/Lint.cpp b/llvm/lib/Analysis/Lint.cpp
index 0694c29..1ab856a 100644
--- a/llvm/lib/Analysis/Lint.cpp
+++ b/llvm/lib/Analysis/Lint.cpp
@@ -350,10 +350,7 @@ void Lint::visitCallBase(CallBase &I) {
}
case Intrinsic::vastart:
- Check(I.getParent()->getParent()->isVarArg(),
- "Undefined behavior: va_start called in a non-varargs function",
- &I);
-
+ // vastart in non-varargs function is rejected by the verifier
visitMemoryReference(I, MemoryLocation::getForArgument(&I, 0, TLI),
std::nullopt, nullptr, MemRef::Read | MemRef::Write);
break;
diff --git a/llvm/lib/CodeGen/GlobalISel/Utils.cpp b/llvm/lib/CodeGen/GlobalISel/Utils.cpp
index c3bc320..ae43e9c 100644
--- a/llvm/lib/CodeGen/GlobalISel/Utils.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/Utils.cpp
@@ -1665,3 +1665,47 @@ void llvm::salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI) {
}
}
}
+
+bool llvm::isPreISelGenericFloatingPointOpcode(unsigned Opc) {
+ switch (Opc) {
+ case TargetOpcode::G_FABS:
+ case TargetOpcode::G_FADD:
+ case TargetOpcode::G_FCANONICALIZE:
+ case TargetOpcode::G_FCEIL:
+ case TargetOpcode::G_FCONSTANT:
+ case TargetOpcode::G_FCOPYSIGN:
+ case TargetOpcode::G_FCOS:
+ case TargetOpcode::G_FDIV:
+ case TargetOpcode::G_FEXP2:
+ case TargetOpcode::G_FEXP:
+ case TargetOpcode::G_FFLOOR:
+ case TargetOpcode::G_FLOG10:
+ case TargetOpcode::G_FLOG2:
+ case TargetOpcode::G_FLOG:
+ case TargetOpcode::G_FMA:
+ case TargetOpcode::G_FMAD:
+ case TargetOpcode::G_FMAXIMUM:
+ case TargetOpcode::G_FMAXNUM:
+ case TargetOpcode::G_FMAXNUM_IEEE:
+ case TargetOpcode::G_FMINIMUM:
+ case TargetOpcode::G_FMINNUM:
+ case TargetOpcode::G_FMINNUM_IEEE:
+ case TargetOpcode::G_FMUL:
+ case TargetOpcode::G_FNEARBYINT:
+ case TargetOpcode::G_FNEG:
+ case TargetOpcode::G_FPEXT:
+ case TargetOpcode::G_FPOW:
+ case TargetOpcode::G_FPTRUNC:
+ case TargetOpcode::G_FREM:
+ case TargetOpcode::G_FRINT:
+ case TargetOpcode::G_FSIN:
+ case TargetOpcode::G_FSQRT:
+ case TargetOpcode::G_FSUB:
+ case TargetOpcode::G_INTRINSIC_ROUND:
+ case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
+ case TargetOpcode::G_INTRINSIC_TRUNC:
+ return true;
+ default:
+ return false;
+ }
+}
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 0fa0bf2..c36b1cc 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -24467,6 +24467,22 @@ SDValue DAGCombiner::visitEXTRACT_SUBVECTOR(SDNode *N) {
if (!LegalOperations || TLI.isOperationLegal(ISD::SPLAT_VECTOR, NVT))
return DAG.getSplatVector(NVT, DL, V.getOperand(0));
+ // extract_subvector(insert_subvector(x,y,c1),c2)
+ // --> extract_subvector(y,c2-c1)
+ // iff we're just extracting from the inserted subvector.
+ if (V.getOpcode() == ISD::INSERT_SUBVECTOR) {
+ SDValue InsSub = V.getOperand(1);
+ EVT InsSubVT = InsSub.getValueType();
+ unsigned NumInsElts = InsSubVT.getVectorMinNumElements();
+ unsigned InsIdx = V.getConstantOperandVal(2);
+ unsigned NumSubElts = NVT.getVectorMinNumElements();
+ if (InsIdx <= ExtIdx && (ExtIdx + NumSubElts) <= (InsIdx + NumInsElts) &&
+ TLI.isExtractSubvectorCheap(NVT, InsSubVT, ExtIdx - InsIdx) &&
+ InsSubVT.isFixedLengthVector() && NVT.isFixedLengthVector())
+ return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, NVT, InsSub,
+ DAG.getVectorIdxConstant(ExtIdx - InsIdx, DL));
+ }
+
// Try to move vector bitcast after extract_subv by scaling extraction index:
// extract_subv (bitcast X), Index --> bitcast (extract_subv X, Index')
if (V.getOpcode() == ISD::BITCAST &&
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index 57a3f6a..7a9cfdf 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -1159,8 +1159,14 @@ void VectorLegalizer::Expand(SDNode *Node, SmallVectorImpl<SDValue> &Results) {
}
SDValue Unrolled = DAG.UnrollVectorOp(Node);
- for (unsigned I = 0, E = Unrolled->getNumValues(); I != E; ++I)
- Results.push_back(Unrolled.getValue(I));
+ if (Node->getNumValues() == 1) {
+ Results.push_back(Unrolled);
+ } else {
+ assert(Node->getNumValues() == Unrolled->getNumValues() &&
+ "VectorLegalizer Expand returned wrong number of results!");
+ for (unsigned I = 0, E = Unrolled->getNumValues(); I != E; ++I)
+ Results.push_back(Unrolled.getValue(I));
+ }
}
SDValue VectorLegalizer::ExpandSELECT(SDNode *Node) {
diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index f64ded4..6e7b67d 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -1809,8 +1809,16 @@ void llvm::GetReturnInfo(CallingConv::ID CC, Type *ReturnType,
else if (attr.hasRetAttr(Attribute::ZExt))
Flags.setZExt();
- for (unsigned i = 0; i < NumParts; ++i)
- Outs.push_back(ISD::OutputArg(Flags, PartVT, VT, /*isfixed=*/true, 0, 0));
+ for (unsigned i = 0; i < NumParts; ++i) {
+ ISD::ArgFlagsTy OutFlags = Flags;
+ if (NumParts > 1 && i == 0)
+ OutFlags.setSplit();
+ else if (i == NumParts - 1 && i != 0)
+ OutFlags.setSplitEnd();
+
+ Outs.push_back(
+ ISD::OutputArg(OutFlags, PartVT, VT, /*isfixed=*/true, 0, 0));
+ }
}
}
diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index 2c480fb..634b2dd 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -5341,10 +5341,11 @@ MDNode *llvm::upgradeInstructionLoopAttachment(MDNode &N) {
std::string llvm::UpgradeDataLayoutString(StringRef DL, StringRef TT) {
Triple T(TT);
- // The only data layout upgrades needed for pre-GCN are setting the address
- // space of globals to 1.
- if (T.isAMDGPU() && !T.isAMDGCN() && !DL.contains("-G") &&
- !DL.starts_with("G")) {
+ // The only data layout upgrades needed for pre-GCN, SPIR or SPIRV are setting
+ // the address space of globals to 1. This does not apply to SPIRV Logical.
+ if (((T.isAMDGPU() && !T.isAMDGCN()) ||
+ (T.isSPIR() || (T.isSPIRV() && !T.isSPIRVLogical()))) &&
+ !DL.contains("-G") && !DL.starts_with("G")) {
return DL.empty() ? std::string("G1") : (DL + "-G1").str();
}
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index 516d4a0..4cd61e6 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -5798,6 +5798,11 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
break;
}
+ case Intrinsic::vastart: {
+ Check(Call.getFunction()->isVarArg(),
+ "va_start called in a non-varargs function");
+ break;
+ }
case Intrinsic::vector_reduce_and:
case Intrinsic::vector_reduce_or:
case Intrinsic::vector_reduce_xor:
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
index d39de77..d5c4ce1 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
@@ -424,43 +424,6 @@ void AArch64RegisterBankInfo::applyMappingImpl(
}
}
-/// Returns whether opcode \p Opc is a pre-isel generic floating-point opcode,
-/// having only floating-point operands.
-static bool isPreISelGenericFloatingPointOpcode(unsigned Opc) {
- switch (Opc) {
- case TargetOpcode::G_FADD:
- case TargetOpcode::G_FSUB:
- case TargetOpcode::G_FMUL:
- case TargetOpcode::G_FMA:
- case TargetOpcode::G_FDIV:
- case TargetOpcode::G_FCONSTANT:
- case TargetOpcode::G_FPEXT:
- case TargetOpcode::G_FPTRUNC:
- case TargetOpcode::G_FCEIL:
- case TargetOpcode::G_FFLOOR:
- case TargetOpcode::G_FNEARBYINT:
- case TargetOpcode::G_FNEG:
- case TargetOpcode::G_FCOS:
- case TargetOpcode::G_FSIN:
- case TargetOpcode::G_FLOG10:
- case TargetOpcode::G_FLOG:
- case TargetOpcode::G_FLOG2:
- case TargetOpcode::G_FSQRT:
- case TargetOpcode::G_FABS:
- case TargetOpcode::G_FEXP:
- case TargetOpcode::G_FRINT:
- case TargetOpcode::G_INTRINSIC_TRUNC:
- case TargetOpcode::G_INTRINSIC_ROUND:
- case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
- case TargetOpcode::G_FMAXNUM:
- case TargetOpcode::G_FMINNUM:
- case TargetOpcode::G_FMAXIMUM:
- case TargetOpcode::G_FMINIMUM:
- return true;
- }
- return false;
-}
-
const RegisterBankInfo::InstructionMapping &
AArch64RegisterBankInfo::getSameKindOfOperandsMapping(
const MachineInstr &MI) const {
diff --git a/llvm/lib/Target/AMDGPU/GCNCreateVOPD.cpp b/llvm/lib/Target/AMDGPU/GCNCreateVOPD.cpp
index 05e10a9..1dda1b8 100644
--- a/llvm/lib/Target/AMDGPU/GCNCreateVOPD.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNCreateVOPD.cpp
@@ -101,6 +101,7 @@ public:
}
}
+ SII->fixImplicitOperands(*VOPDInst);
for (auto CompIdx : VOPD::COMPONENTS)
VOPDInst.copyImplicitOps(*MI[CompIdx]);
diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
index 245731a..acb54fd 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
@@ -612,13 +612,6 @@ BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
// Reserve null register - it shall never be allocated
reserveRegisterTuples(Reserved, AMDGPU::SGPR_NULL64);
- // Disallow vcc_hi allocation in wave32. It may be allocated but most likely
- // will result in bugs.
- if (isWave32) {
- Reserved.set(AMDGPU::VCC);
- Reserved.set(AMDGPU::VCC_HI);
- }
-
// Reserve SGPRs.
//
unsigned MaxNumSGPRs = ST.getMaxNumSGPRs(MF);
diff --git a/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp b/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp
index de492f2..98f5014 100644
--- a/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp
+++ b/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp
@@ -226,11 +226,8 @@ bool LoongArchAsmBackend::shouldInsertFixupForCodeAlign(
MCFixup::create(0, Dummy, MCFixupKind(LoongArch::fixup_loongarch_align));
const MCSymbolRefExpr *MCSym = getSecToAlignSym()[Sec];
if (MCSym == nullptr) {
- // Create a symbol and make the value of symbol is zero.
- MCSymbol *Sym = Ctx.createNamedTempSymbol("la-relax-align");
- Sym->setFragment(&*Sec->getBeginSymbol()->getFragment());
- Asm.registerSymbol(*Sym);
- MCSym = MCSymbolRefExpr::create(Sym, Ctx);
+ // Use section symbol directly.
+ MCSym = MCSymbolRefExpr::create(Sec->getBeginSymbol(), Ctx);
getSecToAlignSym()[Sec] = MCSym;
}
diff --git a/llvm/lib/Target/Mips/MipsRegisterBankInfo.cpp b/llvm/lib/Target/Mips/MipsRegisterBankInfo.cpp
index 6af1fd8..62b58cb 100644
--- a/llvm/lib/Target/Mips/MipsRegisterBankInfo.cpp
+++ b/llvm/lib/Target/Mips/MipsRegisterBankInfo.cpp
@@ -104,26 +104,6 @@ MipsRegisterBankInfo::getRegBankFromRegClass(const TargetRegisterClass &RC,
}
}
-// Instructions where all register operands are floating point.
-static bool isFloatingPointOpcode(unsigned Opc) {
- switch (Opc) {
- case TargetOpcode::G_FCONSTANT:
- case TargetOpcode::G_FADD:
- case TargetOpcode::G_FSUB:
- case TargetOpcode::G_FMUL:
- case TargetOpcode::G_FDIV:
- case TargetOpcode::G_FABS:
- case TargetOpcode::G_FSQRT:
- case TargetOpcode::G_FCEIL:
- case TargetOpcode::G_FFLOOR:
- case TargetOpcode::G_FPEXT:
- case TargetOpcode::G_FPTRUNC:
- return true;
- default:
- return false;
- }
-}
-
// Instructions where use operands are floating point registers.
// Def operands are general purpose.
static bool isFloatingPointOpcodeUse(unsigned Opc) {
@@ -133,7 +113,7 @@ static bool isFloatingPointOpcodeUse(unsigned Opc) {
case TargetOpcode::G_FCMP:
return true;
default:
- return isFloatingPointOpcode(Opc);
+ return isPreISelGenericFloatingPointOpcode(Opc);
}
}
@@ -145,7 +125,7 @@ static bool isFloatingPointOpcodeDef(unsigned Opc) {
case TargetOpcode::G_UITOFP:
return true;
default:
- return isFloatingPointOpcode(Opc);
+ return isPreISelGenericFloatingPointOpcode(Opc);
}
}
diff --git a/llvm/lib/Target/PowerPC/GISel/PPCRegisterBankInfo.cpp b/llvm/lib/Target/PowerPC/GISel/PPCRegisterBankInfo.cpp
index 6aeef145..125a49d 100644
--- a/llvm/lib/Target/PowerPC/GISel/PPCRegisterBankInfo.cpp
+++ b/llvm/lib/Target/PowerPC/GISel/PPCRegisterBankInfo.cpp
@@ -13,6 +13,7 @@
#include "PPCRegisterBankInfo.h"
#include "PPCRegisterInfo.h"
#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
+#include "llvm/CodeGen/GlobalISel/Utils.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Support/Debug.h"
@@ -239,44 +240,6 @@ PPCRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
return getInstructionMapping(MappingID, Cost, OperandsMapping, NumOperands);
}
-/// Returns whether opcode \p Opc is a pre-isel generic floating-point opcode,
-/// having only floating-point operands.
-/// FIXME: this is copied from target AArch64. Needs some code refactor here to
-/// put this function in GlobalISel/Utils.cpp.
-static bool isPreISelGenericFloatingPointOpcode(unsigned Opc) {
- switch (Opc) {
- case TargetOpcode::G_FADD:
- case TargetOpcode::G_FSUB:
- case TargetOpcode::G_FMUL:
- case TargetOpcode::G_FMA:
- case TargetOpcode::G_FDIV:
- case TargetOpcode::G_FCONSTANT:
- case TargetOpcode::G_FPEXT:
- case TargetOpcode::G_FPTRUNC:
- case TargetOpcode::G_FCEIL:
- case TargetOpcode::G_FFLOOR:
- case TargetOpcode::G_FNEARBYINT:
- case TargetOpcode::G_FNEG:
- case TargetOpcode::G_FCOS:
- case TargetOpcode::G_FSIN:
- case TargetOpcode::G_FLOG10:
- case TargetOpcode::G_FLOG:
- case TargetOpcode::G_FLOG2:
- case TargetOpcode::G_FSQRT:
- case TargetOpcode::G_FABS:
- case TargetOpcode::G_FEXP:
- case TargetOpcode::G_FRINT:
- case TargetOpcode::G_INTRINSIC_TRUNC:
- case TargetOpcode::G_INTRINSIC_ROUND:
- case TargetOpcode::G_FMAXNUM:
- case TargetOpcode::G_FMINNUM:
- case TargetOpcode::G_FMAXIMUM:
- case TargetOpcode::G_FMINIMUM:
- return true;
- }
- return false;
-}
-
/// \returns true if a given intrinsic \p ID only uses and defines FPRs.
static bool isFPIntrinsic(unsigned ID) {
// TODO: Add more intrinsics.
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
index 45e19cd..c18892a 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp
@@ -34,14 +34,15 @@ private:
// Whether this is assigning args for a return.
bool IsRet;
- // true if assignArg has been called for a mask argument, false otherwise.
- bool AssignedFirstMaskArg = false;
+ RVVArgDispatcher &RVVDispatcher;
public:
RISCVOutgoingValueAssigner(
- RISCVTargetLowering::RISCVCCAssignFn *RISCVAssignFn_, bool IsRet)
+ RISCVTargetLowering::RISCVCCAssignFn *RISCVAssignFn_, bool IsRet,
+ RVVArgDispatcher &RVVDispatcher)
: CallLowering::OutgoingValueAssigner(nullptr),
- RISCVAssignFn(RISCVAssignFn_), IsRet(IsRet) {}
+ RISCVAssignFn(RISCVAssignFn_), IsRet(IsRet),
+ RVVDispatcher(RVVDispatcher) {}
bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo,
@@ -51,16 +52,9 @@ public:
const DataLayout &DL = MF.getDataLayout();
const RISCVSubtarget &Subtarget = MF.getSubtarget<RISCVSubtarget>();
- std::optional<unsigned> FirstMaskArgument;
- if (Subtarget.hasVInstructions() && !AssignedFirstMaskArg &&
- ValVT.isVector() && ValVT.getVectorElementType() == MVT::i1) {
- FirstMaskArgument = ValNo;
- AssignedFirstMaskArg = true;
- }
-
if (RISCVAssignFn(DL, Subtarget.getTargetABI(), ValNo, ValVT, LocVT,
LocInfo, Flags, State, Info.IsFixed, IsRet, Info.Ty,
- *Subtarget.getTargetLowering(), FirstMaskArgument))
+ *Subtarget.getTargetLowering(), RVVDispatcher))
return true;
StackSize = State.getStackSize();
@@ -181,14 +175,15 @@ private:
// Whether this is assigning args from a return.
bool IsRet;
- // true if assignArg has been called for a mask argument, false otherwise.
- bool AssignedFirstMaskArg = false;
+ RVVArgDispatcher &RVVDispatcher;
public:
RISCVIncomingValueAssigner(
- RISCVTargetLowering::RISCVCCAssignFn *RISCVAssignFn_, bool IsRet)
+ RISCVTargetLowering::RISCVCCAssignFn *RISCVAssignFn_, bool IsRet,
+ RVVArgDispatcher &RVVDispatcher)
: CallLowering::IncomingValueAssigner(nullptr),
- RISCVAssignFn(RISCVAssignFn_), IsRet(IsRet) {}
+ RISCVAssignFn(RISCVAssignFn_), IsRet(IsRet),
+ RVVDispatcher(RVVDispatcher) {}
bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo,
@@ -201,16 +196,9 @@ public:
if (LocVT.isScalableVector())
MF.getInfo<RISCVMachineFunctionInfo>()->setIsVectorCall();
- std::optional<unsigned> FirstMaskArgument;
- if (Subtarget.hasVInstructions() && !AssignedFirstMaskArg &&
- ValVT.isVector() && ValVT.getVectorElementType() == MVT::i1) {
- FirstMaskArgument = ValNo;
- AssignedFirstMaskArg = true;
- }
-
if (RISCVAssignFn(DL, Subtarget.getTargetABI(), ValNo, ValVT, LocVT,
LocInfo, Flags, State, /*IsFixed=*/true, IsRet, Info.Ty,
- *Subtarget.getTargetLowering(), FirstMaskArgument))
+ *Subtarget.getTargetLowering(), RVVDispatcher))
return true;
StackSize = State.getStackSize();
@@ -420,9 +408,11 @@ bool RISCVCallLowering::lowerReturnVal(MachineIRBuilder &MIRBuilder,
SmallVector<ArgInfo, 4> SplitRetInfos;
splitToValueTypes(OrigRetInfo, SplitRetInfos, DL, CC);
+ RVVArgDispatcher Dispatcher{&MF, getTLI<RISCVTargetLowering>(),
+ ArrayRef(F.getReturnType())};
RISCVOutgoingValueAssigner Assigner(
CC == CallingConv::Fast ? RISCV::CC_RISCV_FastCC : RISCV::CC_RISCV,
- /*IsRet=*/true);
+ /*IsRet=*/true, Dispatcher);
RISCVOutgoingValueHandler Handler(MIRBuilder, MF.getRegInfo(), Ret);
return determineAndHandleAssignments(Handler, Assigner, SplitRetInfos,
MIRBuilder, CC, F.isVarArg());
@@ -531,6 +521,7 @@ bool RISCVCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
CallingConv::ID CC = F.getCallingConv();
SmallVector<ArgInfo, 32> SplitArgInfos;
+ SmallVector<Type *, 4> TypeList;
unsigned Index = 0;
for (auto &Arg : F.args()) {
// Construct the ArgInfo object from destination register and argument type.
@@ -542,12 +533,16 @@ bool RISCVCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
// correspondingly and appended to SplitArgInfos.
splitToValueTypes(AInfo, SplitArgInfos, DL, CC);
+ TypeList.push_back(Arg.getType());
+
++Index;
}
+ RVVArgDispatcher Dispatcher{&MF, getTLI<RISCVTargetLowering>(),
+ ArrayRef(TypeList)};
RISCVIncomingValueAssigner Assigner(
CC == CallingConv::Fast ? RISCV::CC_RISCV_FastCC : RISCV::CC_RISCV,
- /*IsRet=*/false);
+ /*IsRet=*/false, Dispatcher);
RISCVFormalArgHandler Handler(MIRBuilder, MF.getRegInfo());
SmallVector<CCValAssign, 16> ArgLocs;
@@ -585,11 +580,13 @@ bool RISCVCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
SmallVector<ArgInfo, 32> SplitArgInfos;
SmallVector<ISD::OutputArg, 8> Outs;
+ SmallVector<Type *, 4> TypeList;
for (auto &AInfo : Info.OrigArgs) {
// Handle any required unmerging of split value types from a given VReg into
// physical registers. ArgInfo objects are constructed correspondingly and
// appended to SplitArgInfos.
splitToValueTypes(AInfo, SplitArgInfos, DL, CC);
+ TypeList.push_back(AInfo.Ty);
}
// TODO: Support tail calls.
@@ -607,9 +604,11 @@ bool RISCVCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
Call.addRegMask(TRI->getCallPreservedMask(MF, Info.CallConv));
+ RVVArgDispatcher ArgDispatcher{&MF, getTLI<RISCVTargetLowering>(),
+ ArrayRef(TypeList)};
RISCVOutgoingValueAssigner ArgAssigner(
CC == CallingConv::Fast ? RISCV::CC_RISCV_FastCC : RISCV::CC_RISCV,
- /*IsRet=*/false);
+ /*IsRet=*/false, ArgDispatcher);
RISCVOutgoingValueHandler ArgHandler(MIRBuilder, MF.getRegInfo(), Call);
if (!determineAndHandleAssignments(ArgHandler, ArgAssigner, SplitArgInfos,
MIRBuilder, CC, Info.IsVarArg))
@@ -637,9 +636,11 @@ bool RISCVCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
SmallVector<ArgInfo, 4> SplitRetInfos;
splitToValueTypes(Info.OrigRet, SplitRetInfos, DL, CC);
+ RVVArgDispatcher RetDispatcher{&MF, getTLI<RISCVTargetLowering>(),
+ ArrayRef(F.getReturnType())};
RISCVIncomingValueAssigner RetAssigner(
CC == CallingConv::Fast ? RISCV::CC_RISCV_FastCC : RISCV::CC_RISCV,
- /*IsRet=*/true);
+ /*IsRet=*/true, RetDispatcher);
RISCVCallReturnHandler RetHandler(MIRBuilder, MF.getRegInfo(), Call);
if (!determineAndHandleAssignments(RetHandler, RetAssigner, SplitRetInfos,
MIRBuilder, CC, Info.IsVarArg))
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
index ca77a97..c1fde73 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
@@ -142,46 +142,6 @@ static const RegisterBankInfo::ValueMapping *getFPValueMapping(unsigned Size) {
return &RISCV::ValueMappings[Idx];
}
-/// Returns whether opcode \p Opc is a pre-isel generic floating-point opcode,
-/// having only floating-point operands.
-/// FIXME: this is copied from target AArch64. Needs some code refactor here to
-/// put this function in GlobalISel/Utils.cpp.
-static bool isPreISelGenericFloatingPointOpcode(unsigned Opc) {
- switch (Opc) {
- case TargetOpcode::G_FADD:
- case TargetOpcode::G_FSUB:
- case TargetOpcode::G_FMUL:
- case TargetOpcode::G_FMA:
- case TargetOpcode::G_FDIV:
- case TargetOpcode::G_FCONSTANT:
- case TargetOpcode::G_FPEXT:
- case TargetOpcode::G_FPTRUNC:
- case TargetOpcode::G_FCEIL:
- case TargetOpcode::G_FFLOOR:
- case TargetOpcode::G_FNEARBYINT:
- case TargetOpcode::G_FNEG:
- case TargetOpcode::G_FCOPYSIGN:
- case TargetOpcode::G_FCOS:
- case TargetOpcode::G_FSIN:
- case TargetOpcode::G_FLOG10:
- case TargetOpcode::G_FLOG:
- case TargetOpcode::G_FLOG2:
- case TargetOpcode::G_FSQRT:
- case TargetOpcode::G_FABS:
- case TargetOpcode::G_FEXP:
- case TargetOpcode::G_FRINT:
- case TargetOpcode::G_INTRINSIC_TRUNC:
- case TargetOpcode::G_INTRINSIC_ROUND:
- case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
- case TargetOpcode::G_FMAXNUM:
- case TargetOpcode::G_FMINNUM:
- case TargetOpcode::G_FMAXIMUM:
- case TargetOpcode::G_FMINIMUM:
- return true;
- }
- return false;
-}
-
// TODO: Make this more like AArch64?
bool RISCVRegisterBankInfo::hasFPConstraints(
const MachineInstr &MI, const MachineRegisterInfo &MRI,
diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td
index 794455a..5996221 100644
--- a/llvm/lib/Target/RISCV/RISCVFeatures.td
+++ b/llvm/lib/Target/RISCV/RISCVFeatures.td
@@ -1226,9 +1226,9 @@ def TuneNoSinkSplatOperands
"false", "Disable sink splat operands to enable .vx, .vf,"
".wx, and .wf instructions">;
-def TuneNoStripWSuffix
- : SubtargetFeature<"no-strip-w-suffix", "EnableStripWSuffix", "false",
- "Disable strip W suffix">;
+def TunePreferWInst
+ : SubtargetFeature<"prefer-w-inst", "PreferWInst", "true",
+ "Prefer instructions with W suffix">;
def TuneConditionalCompressedMoveFusion
: SubtargetFeature<"conditional-cmv-fusion", "HasConditionalCompressedMoveFusion",
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 1d1ea6b..765838a 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -22,6 +22,7 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/Analysis/VectorUtils.h"
+#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
@@ -18223,33 +18224,12 @@ static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
return false;
}
-static unsigned allocateRVVReg(MVT ValVT, unsigned ValNo,
- std::optional<unsigned> FirstMaskArgument,
- CCState &State, const RISCVTargetLowering &TLI) {
- const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
- if (RC == &RISCV::VRRegClass) {
- // Assign the first mask argument to V0.
- // This is an interim calling convention and it may be changed in the
- // future.
- if (FirstMaskArgument && ValNo == *FirstMaskArgument)
- return State.AllocateReg(RISCV::V0);
- return State.AllocateReg(ArgVRs);
- }
- if (RC == &RISCV::VRM2RegClass)
- return State.AllocateReg(ArgVRM2s);
- if (RC == &RISCV::VRM4RegClass)
- return State.AllocateReg(ArgVRM4s);
- if (RC == &RISCV::VRM8RegClass)
- return State.AllocateReg(ArgVRM8s);
- llvm_unreachable("Unhandled register class for ValueType");
-}
-
// Implements the RISC-V calling convention. Returns true upon failure.
bool RISCV::CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
- std::optional<unsigned> FirstMaskArgument) {
+ RVVArgDispatcher &RVVDispatcher) {
unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
assert(XLen == 32 || XLen == 64);
MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
@@ -18418,7 +18398,7 @@ bool RISCV::CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
else if (ValVT == MVT::f64 && !UseGPRForF64)
Reg = State.AllocateReg(ArgFPR64s);
else if (ValVT.isVector()) {
- Reg = allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI);
+ Reg = RVVDispatcher.getNextPhysReg();
if (!Reg) {
// For return values, the vector must be passed fully via registers or
// via the stack.
@@ -18504,9 +18484,15 @@ void RISCVTargetLowering::analyzeInputArgs(
unsigned NumArgs = Ins.size();
FunctionType *FType = MF.getFunction().getFunctionType();
- std::optional<unsigned> FirstMaskArgument;
- if (Subtarget.hasVInstructions())
- FirstMaskArgument = preAssignMask(Ins);
+ RVVArgDispatcher Dispatcher;
+ if (IsRet) {
+ Dispatcher = RVVArgDispatcher{&MF, this, ArrayRef(Ins)};
+ } else {
+ SmallVector<Type *, 4> TypeList;
+ for (const Argument &Arg : MF.getFunction().args())
+ TypeList.push_back(Arg.getType());
+ Dispatcher = RVVArgDispatcher{&MF, this, ArrayRef(TypeList)};
+ }
for (unsigned i = 0; i != NumArgs; ++i) {
MVT ArgVT = Ins[i].VT;
@@ -18521,7 +18507,7 @@ void RISCVTargetLowering::analyzeInputArgs(
RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this,
- FirstMaskArgument)) {
+ Dispatcher)) {
LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
<< ArgVT << '\n');
llvm_unreachable(nullptr);
@@ -18535,9 +18521,13 @@ void RISCVTargetLowering::analyzeOutputArgs(
CallLoweringInfo *CLI, RISCVCCAssignFn Fn) const {
unsigned NumArgs = Outs.size();
- std::optional<unsigned> FirstMaskArgument;
- if (Subtarget.hasVInstructions())
- FirstMaskArgument = preAssignMask(Outs);
+ SmallVector<Type *, 4> TypeList;
+ if (IsRet)
+ TypeList.push_back(MF.getFunction().getReturnType());
+ else if (CLI)
+ for (const TargetLowering::ArgListEntry &Arg : CLI->getArgs())
+ TypeList.push_back(Arg.Ty);
+ RVVArgDispatcher Dispatcher{&MF, this, ArrayRef(TypeList)};
for (unsigned i = 0; i != NumArgs; i++) {
MVT ArgVT = Outs[i].VT;
@@ -18547,7 +18537,7 @@ void RISCVTargetLowering::analyzeOutputArgs(
RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this,
- FirstMaskArgument)) {
+ Dispatcher)) {
LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
<< ArgVT << "\n");
llvm_unreachable(nullptr);
@@ -18728,7 +18718,7 @@ bool RISCV::CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI,
ISD::ArgFlagsTy ArgFlags, CCState &State,
bool IsFixed, bool IsRet, Type *OrigTy,
const RISCVTargetLowering &TLI,
- std::optional<unsigned> FirstMaskArgument) {
+ RVVArgDispatcher &RVVDispatcher) {
if (LocVT == MVT::i32 || LocVT == MVT::i64) {
if (unsigned Reg = State.AllocateReg(getFastCCArgGPRs(ABI))) {
State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
@@ -18806,13 +18796,14 @@ bool RISCV::CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI,
}
if (LocVT.isVector()) {
- if (unsigned Reg =
- allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI)) {
+ MCPhysReg AllocatedVReg = RVVDispatcher.getNextPhysReg();
+ if (AllocatedVReg) {
// Fixed-length vectors are located in the corresponding scalable-vector
// container types.
if (ValVT.isFixedLengthVector())
LocVT = TLI.getContainerForFixedLengthVector(LocVT);
- State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ State.addLoc(
+ CCValAssign::getReg(ValNo, ValVT, AllocatedVReg, LocVT, LocInfo));
} else {
// Try and pass the address via a "fast" GPR.
if (unsigned GPRReg = State.AllocateReg(getFastCCArgGPRs(ABI))) {
@@ -19440,17 +19431,15 @@ bool RISCVTargetLowering::CanLowerReturn(
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
- std::optional<unsigned> FirstMaskArgument;
- if (Subtarget.hasVInstructions())
- FirstMaskArgument = preAssignMask(Outs);
+ RVVArgDispatcher Dispatcher{&MF, this, ArrayRef(Outs)};
for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
MVT VT = Outs[i].VT;
ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
if (RISCV::CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
- ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr,
- *this, FirstMaskArgument))
+ ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true,
+ nullptr, *this, Dispatcher))
return false;
}
return true;
@@ -21247,6 +21236,181 @@ unsigned RISCVTargetLowering::getMinimumJumpTableEntries() const {
return Subtarget.getMinimumJumpTableEntries();
}
+// Handle single arg such as return value.
+template <typename Arg>
+void RVVArgDispatcher::constructArgInfos(ArrayRef<Arg> ArgList) {
+ // This lambda determines whether an array of types are constructed by
+ // homogeneous vector types.
+ auto isHomogeneousScalableVectorType = [](ArrayRef<Arg> ArgList) {
+ // First, extract the first element in the argument type.
+ auto It = ArgList.begin();
+ MVT FirstArgRegType = It->VT;
+
+ // Return if there is no return or the type needs split.
+ if (It == ArgList.end() || It->Flags.isSplit())
+ return false;
+
+ ++It;
+
+ // Return if this argument type contains only 1 element, or it's not a
+ // vector type.
+ if (It == ArgList.end() || !FirstArgRegType.isScalableVector())
+ return false;
+
+ // Second, check if the following elements in this argument type are all the
+ // same.
+ for (; It != ArgList.end(); ++It)
+ if (It->Flags.isSplit() || It->VT != FirstArgRegType)
+ return false;
+
+ return true;
+ };
+
+ if (isHomogeneousScalableVectorType(ArgList)) {
+ // Handle as tuple type
+ RVVArgInfos.push_back({(unsigned)ArgList.size(), ArgList[0].VT, false});
+ } else {
+ // Handle as normal vector type
+ bool FirstVMaskAssigned = false;
+ for (const auto &OutArg : ArgList) {
+ MVT RegisterVT = OutArg.VT;
+
+ // Skip non-RVV register type
+ if (!RegisterVT.isVector())
+ continue;
+
+ if (RegisterVT.isFixedLengthVector())
+ RegisterVT = TLI->getContainerForFixedLengthVector(RegisterVT);
+
+ if (!FirstVMaskAssigned && RegisterVT.getVectorElementType() == MVT::i1) {
+ RVVArgInfos.push_back({1, RegisterVT, true});
+ FirstVMaskAssigned = true;
+ continue;
+ }
+
+ RVVArgInfos.push_back({1, RegisterVT, false});
+ }
+ }
+}
+
+// Handle multiple args.
+template <>
+void RVVArgDispatcher::constructArgInfos<Type *>(ArrayRef<Type *> TypeList) {
+ const DataLayout &DL = MF->getDataLayout();
+ const Function &F = MF->getFunction();
+ LLVMContext &Context = F.getContext();
+
+ bool FirstVMaskAssigned = false;
+ for (Type *Ty : TypeList) {
+ StructType *STy = dyn_cast<StructType>(Ty);
+ if (STy && STy->containsHomogeneousScalableVectorTypes()) {
+ Type *ElemTy = STy->getTypeAtIndex(0U);
+ EVT VT = TLI->getValueType(DL, ElemTy);
+ MVT RegisterVT =
+ TLI->getRegisterTypeForCallingConv(Context, F.getCallingConv(), VT);
+ unsigned NumRegs =
+ TLI->getNumRegistersForCallingConv(Context, F.getCallingConv(), VT);
+
+ RVVArgInfos.push_back(
+ {NumRegs * STy->getNumElements(), RegisterVT, false});
+ } else {
+ SmallVector<EVT, 4> ValueVTs;
+ ComputeValueVTs(*TLI, DL, Ty, ValueVTs);
+
+ for (unsigned Value = 0, NumValues = ValueVTs.size(); Value != NumValues;
+ ++Value) {
+ EVT VT = ValueVTs[Value];
+ MVT RegisterVT =
+ TLI->getRegisterTypeForCallingConv(Context, F.getCallingConv(), VT);
+ unsigned NumRegs =
+ TLI->getNumRegistersForCallingConv(Context, F.getCallingConv(), VT);
+
+ // Skip non-RVV register type
+ if (!RegisterVT.isVector())
+ continue;
+
+ if (RegisterVT.isFixedLengthVector())
+ RegisterVT = TLI->getContainerForFixedLengthVector(RegisterVT);
+
+ if (!FirstVMaskAssigned &&
+ RegisterVT.getVectorElementType() == MVT::i1) {
+ RVVArgInfos.push_back({1, RegisterVT, true});
+ FirstVMaskAssigned = true;
+ --NumRegs;
+ }
+
+ RVVArgInfos.insert(RVVArgInfos.end(), NumRegs, {1, RegisterVT, false});
+ }
+ }
+ }
+}
+
+void RVVArgDispatcher::allocatePhysReg(unsigned NF, unsigned LMul,
+ unsigned StartReg) {
+ assert((StartReg % LMul) == 0 &&
+ "Start register number should be multiple of lmul");
+ const MCPhysReg *VRArrays;
+ switch (LMul) {
+ default:
+ report_fatal_error("Invalid lmul");
+ case 1:
+ VRArrays = ArgVRs;
+ break;
+ case 2:
+ VRArrays = ArgVRM2s;
+ break;
+ case 4:
+ VRArrays = ArgVRM4s;
+ break;
+ case 8:
+ VRArrays = ArgVRM8s;
+ break;
+ }
+
+ for (unsigned i = 0; i < NF; ++i)
+ if (StartReg)
+ AllocatedPhysRegs.push_back(VRArrays[(StartReg - 8) / LMul + i]);
+ else
+ AllocatedPhysRegs.push_back(MCPhysReg());
+}
+
+/// This function determines if each RVV argument is passed by register, if the
+/// argument can be assigned to a VR, then give it a specific register.
+/// Otherwise, assign the argument to 0 which is a invalid MCPhysReg.
+void RVVArgDispatcher::compute() {
+ uint32_t AssignedMap = 0;
+ auto allocate = [&](const RVVArgInfo &ArgInfo) {
+ // Allocate first vector mask argument to V0.
+ if (ArgInfo.FirstVMask) {
+ AllocatedPhysRegs.push_back(RISCV::V0);
+ return;
+ }
+
+ unsigned RegsNeeded = divideCeil(
+ ArgInfo.VT.getSizeInBits().getKnownMinValue(), RISCV::RVVBitsPerBlock);
+ unsigned TotalRegsNeeded = ArgInfo.NF * RegsNeeded;
+ for (unsigned StartReg = 0; StartReg + TotalRegsNeeded <= NumArgVRs;
+ StartReg += RegsNeeded) {
+ uint32_t Map = ((1 << TotalRegsNeeded) - 1) << StartReg;
+ if ((AssignedMap & Map) == 0) {
+ allocatePhysReg(ArgInfo.NF, RegsNeeded, StartReg + 8);
+ AssignedMap |= Map;
+ return;
+ }
+ }
+
+ allocatePhysReg(ArgInfo.NF, RegsNeeded, 0);
+ };
+
+ for (unsigned i = 0; i < RVVArgInfos.size(); ++i)
+ allocate(RVVArgInfos[i]);
+}
+
+MCPhysReg RVVArgDispatcher::getNextPhysReg() {
+ assert(CurIdx < AllocatedPhysRegs.size() && "Index out of range");
+ return AllocatedPhysRegs[CurIdx++];
+}
+
namespace llvm::RISCVVIntrinsicsTable {
#define GET_RISCVVIntrinsicsTable_IMPL
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index e263373..b10da3d 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -24,6 +24,7 @@ namespace llvm {
class InstructionCost;
class RISCVSubtarget;
struct RISCVRegisterInfo;
+class RVVArgDispatcher;
namespace RISCVISD {
// clang-format off
@@ -875,7 +876,7 @@ public:
ISD::ArgFlagsTy ArgFlags, CCState &State,
bool IsFixed, bool IsRet, Type *OrigTy,
const RISCVTargetLowering &TLI,
- std::optional<unsigned> FirstMaskArgument);
+ RVVArgDispatcher &RVVDispatcher);
private:
void analyzeInputArgs(MachineFunction &MF, CCState &CCInfo,
@@ -1017,19 +1018,71 @@ private:
unsigned getMinimumJumpTableEntries() const override;
};
+/// As per the spec, the rules for passing vector arguments are as follows:
+///
+/// 1. For the first vector mask argument, use v0 to pass it.
+/// 2. For vector data arguments or rest vector mask arguments, starting from
+/// the v8 register, if a vector register group between v8-v23 that has not been
+/// allocated can be found and the first register number is a multiple of LMUL,
+/// then allocate this vector register group to the argument and mark these
+/// registers as allocated. Otherwise, pass it by reference and are replaced in
+/// the argument list with the address.
+/// 3. For tuple vector data arguments, starting from the v8 register, if
+/// NFIELDS consecutive vector register groups between v8-v23 that have not been
+/// allocated can be found and the first register number is a multiple of LMUL,
+/// then allocate these vector register groups to the argument and mark these
+/// registers as allocated. Otherwise, pass it by reference and are replaced in
+/// the argument list with the address.
+class RVVArgDispatcher {
+public:
+ static constexpr unsigned NumArgVRs = 16;
+
+ struct RVVArgInfo {
+ unsigned NF;
+ MVT VT;
+ bool FirstVMask = false;
+ };
+
+ template <typename Arg>
+ RVVArgDispatcher(const MachineFunction *MF, const RISCVTargetLowering *TLI,
+ ArrayRef<Arg> ArgList)
+ : MF(MF), TLI(TLI) {
+ constructArgInfos(ArgList);
+ compute();
+ }
+
+ RVVArgDispatcher() = default;
+
+ MCPhysReg getNextPhysReg();
+
+private:
+ SmallVector<RVVArgInfo, 4> RVVArgInfos;
+ SmallVector<MCPhysReg, 4> AllocatedPhysRegs;
+
+ const MachineFunction *MF = nullptr;
+ const RISCVTargetLowering *TLI = nullptr;
+
+ unsigned CurIdx = 0;
+
+ template <typename Arg> void constructArgInfos(ArrayRef<Arg> Ret);
+ void compute();
+ void allocatePhysReg(unsigned NF = 1, unsigned LMul = 1,
+ unsigned StartReg = 0);
+};
+
namespace RISCV {
bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
- std::optional<unsigned> FirstMaskArgument);
+ RVVArgDispatcher &RVVDispatcher);
bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
- std::optional<unsigned> FirstMaskArgument);
+ RVVArgDispatcher &RVVDispatcher);
bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index b0fda040..668062c 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -2719,6 +2719,50 @@ std::string RISCVInstrInfo::createMIROperandComment(
}
// clang-format off
+#define CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL) \
+ RISCV::Pseudo##OP##_##LMUL
+
+#define CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL) \
+ RISCV::Pseudo##OP##_##LMUL##_MASK
+
+#define CASE_RVV_OPCODE_LMUL(OP, LMUL) \
+ CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL): \
+ case CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL)
+
+#define CASE_RVV_OPCODE_UNMASK_WIDEN(OP) \
+ CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF8): \
+ case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF4): \
+ case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF2): \
+ case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M1): \
+ case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M2): \
+ case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M4)
+
+#define CASE_RVV_OPCODE_UNMASK(OP) \
+ CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
+ case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M8)
+
+#define CASE_RVV_OPCODE_MASK_WIDEN(OP) \
+ CASE_RVV_OPCODE_MASK_LMUL(OP, MF8): \
+ case CASE_RVV_OPCODE_MASK_LMUL(OP, MF4): \
+ case CASE_RVV_OPCODE_MASK_LMUL(OP, MF2): \
+ case CASE_RVV_OPCODE_MASK_LMUL(OP, M1): \
+ case CASE_RVV_OPCODE_MASK_LMUL(OP, M2): \
+ case CASE_RVV_OPCODE_MASK_LMUL(OP, M4)
+
+#define CASE_RVV_OPCODE_MASK(OP) \
+ CASE_RVV_OPCODE_MASK_WIDEN(OP): \
+ case CASE_RVV_OPCODE_MASK_LMUL(OP, M8)
+
+#define CASE_RVV_OPCODE_WIDEN(OP) \
+ CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
+ case CASE_RVV_OPCODE_MASK_WIDEN(OP)
+
+#define CASE_RVV_OPCODE(OP) \
+ CASE_RVV_OPCODE_UNMASK(OP): \
+ case CASE_RVV_OPCODE_MASK(OP)
+// clang-format on
+
+// clang-format off
#define CASE_VMA_OPCODE_COMMON(OP, TYPE, LMUL) \
RISCV::PseudoV##OP##_##TYPE##_##LMUL
@@ -2798,6 +2842,28 @@ bool RISCVInstrInfo::findCommutedOpIndices(const MachineInstr &MI,
case RISCV::PseudoCCMOVGPR:
// Operands 4 and 5 are commutable.
return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 4, 5);
+ case CASE_RVV_OPCODE(VADD_VV):
+ case CASE_RVV_OPCODE(VAND_VV):
+ case CASE_RVV_OPCODE(VOR_VV):
+ case CASE_RVV_OPCODE(VXOR_VV):
+ case CASE_RVV_OPCODE_MASK(VMSEQ_VV):
+ case CASE_RVV_OPCODE_MASK(VMSNE_VV):
+ case CASE_RVV_OPCODE(VMIN_VV):
+ case CASE_RVV_OPCODE(VMINU_VV):
+ case CASE_RVV_OPCODE(VMAX_VV):
+ case CASE_RVV_OPCODE(VMAXU_VV):
+ case CASE_RVV_OPCODE(VMUL_VV):
+ case CASE_RVV_OPCODE(VMULH_VV):
+ case CASE_RVV_OPCODE(VMULHU_VV):
+ case CASE_RVV_OPCODE_WIDEN(VWADD_VV):
+ case CASE_RVV_OPCODE_WIDEN(VWADDU_VV):
+ case CASE_RVV_OPCODE_WIDEN(VWMUL_VV):
+ case CASE_RVV_OPCODE_WIDEN(VWMULU_VV):
+ case CASE_RVV_OPCODE_WIDEN(VWMACC_VV):
+ case CASE_RVV_OPCODE_WIDEN(VWMACCU_VV):
+ case CASE_RVV_OPCODE_UNMASK(VADC_VVM):
+ // Operands 2 and 3 are commutable.
+ return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
case CASE_VFMA_SPLATS(FMADD):
case CASE_VFMA_SPLATS(FMSUB):
case CASE_VFMA_SPLATS(FMACC):
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 686bfd1..0b83179 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -2129,8 +2129,9 @@ multiclass VPseudoBinary<VReg RetClass,
LMULInfo MInfo,
string Constraint = "",
int sew = 0,
- int TargetConstraintType = 1> {
- let VLMul = MInfo.value, SEW=sew in {
+ int TargetConstraintType = 1,
+ bit Commutable = 0> {
+ let VLMul = MInfo.value, SEW=sew, isCommutable = Commutable in {
defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX);
def suffix : VPseudoBinaryNoMaskTU<RetClass, Op1Class, Op2Class,
Constraint, TargetConstraintType>;
@@ -2169,8 +2170,9 @@ multiclass VPseudoBinaryM<VReg RetClass,
DAGOperand Op2Class,
LMULInfo MInfo,
string Constraint = "",
- int TargetConstraintType = 1> {
- let VLMul = MInfo.value in {
+ int TargetConstraintType = 1,
+ bit Commutable = 0> {
+ let VLMul = MInfo.value, isCommutable = Commutable in {
def "_" # MInfo.MX : VPseudoBinaryMOutNoMask<RetClass, Op1Class, Op2Class,
Constraint, TargetConstraintType>;
let ForceTailAgnostic = true in
@@ -2228,8 +2230,8 @@ multiclass VPseudoTiedBinaryRoundingMode<VReg RetClass,
}
-multiclass VPseudoBinaryV_VV<LMULInfo m, string Constraint = "", int sew = 0> {
- defm _VV : VPseudoBinary<m.vrclass, m.vrclass, m.vrclass, m, Constraint, sew>;
+multiclass VPseudoBinaryV_VV<LMULInfo m, string Constraint = "", int sew = 0, bit Commutable = 0> {
+ defm _VV : VPseudoBinary<m.vrclass, m.vrclass, m.vrclass, m, Constraint, sew, Commutable=Commutable>;
}
multiclass VPseudoBinaryV_VV_RM<LMULInfo m, string Constraint = ""> {
@@ -2333,9 +2335,10 @@ multiclass VPseudoVALU_MM<bit Commutable = 0> {
// * The destination EEW is greater than the source EEW, the source EMUL is
// at least 1, and the overlap is in the highest-numbered part of the
// destination register group is legal. Otherwise, it is illegal.
-multiclass VPseudoBinaryW_VV<LMULInfo m> {
+multiclass VPseudoBinaryW_VV<LMULInfo m, bit Commutable = 0> {
defm _VV : VPseudoBinary<m.wvrclass, m.vrclass, m.vrclass, m,
- "@earlyclobber $rd", TargetConstraintType=3>;
+ "@earlyclobber $rd", TargetConstraintType=3,
+ Commutable=Commutable>;
}
multiclass VPseudoBinaryW_VV_RM<LMULInfo m, int sew = 0> {
@@ -2455,7 +2458,9 @@ multiclass VPseudoBinaryV_VM<LMULInfo m, bit CarryOut = 0, bit CarryIn = 1,
m.vrclass, m.vrclass, m, CarryIn, Constraint, TargetConstraintType>;
}
-multiclass VPseudoTiedBinaryV_VM<LMULInfo m, int TargetConstraintType = 1> {
+multiclass VPseudoTiedBinaryV_VM<LMULInfo m, int TargetConstraintType = 1,
+ bit Commutable = 0> {
+ let isCommutable = Commutable in
def "_VVM" # "_" # m.MX:
VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
m.vrclass, m.vrclass, m, 1, "",
@@ -2669,8 +2674,10 @@ multiclass PseudoVEXT_VF8 {
// lowest-numbered part of the source register group".
// With LMUL<=1 the source and dest occupy a single register so any overlap
// is in the lowest-numbered part.
-multiclass VPseudoBinaryM_VV<LMULInfo m, int TargetConstraintType = 1> {
- defm _VV : VPseudoBinaryM<m.moutclass, m.vrclass, m.vrclass, m, "", TargetConstraintType>;
+multiclass VPseudoBinaryM_VV<LMULInfo m, int TargetConstraintType = 1,
+ bit Commutable = 0> {
+ defm _VV : VPseudoBinaryM<m.moutclass, m.vrclass, m.vrclass, m, "",
+ TargetConstraintType, Commutable=Commutable>;
}
multiclass VPseudoBinaryM_VX<LMULInfo m, int TargetConstraintType = 1> {
@@ -2749,10 +2756,11 @@ multiclass VPseudoVSSHT_VV_VX_VI_RM<Operand ImmType = simm5, string Constraint =
}
}
-multiclass VPseudoVALU_VV_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
+multiclass VPseudoVALU_VV_VX_VI<Operand ImmType = simm5, string Constraint = "",
+ bit Commutable = 0> {
foreach m = MxList in {
defvar mx = m.MX;
- defm "" : VPseudoBinaryV_VV<m, Constraint>,
+ defm "" : VPseudoBinaryV_VV<m, Constraint, Commutable=Commutable>,
SchedBinary<"WriteVIALUV", "ReadVIALUV", "ReadVIALUV", mx,
forceMergeOpRead=true>;
defm "" : VPseudoBinaryV_VX<m, Constraint>,
@@ -2802,17 +2810,17 @@ multiclass VPseudoVAALU_VV_VX_RM {
multiclass VPseudoVMINMAX_VV_VX {
foreach m = MxList in {
defvar mx = m.MX;
- defm "" : VPseudoBinaryV_VV<m>,
+ defm "" : VPseudoBinaryV_VV<m, Commutable=1>,
SchedBinary<"WriteVIMinMaxV", "ReadVIMinMaxV", "ReadVIMinMaxV", mx>;
defm "" : VPseudoBinaryV_VX<m>,
SchedBinary<"WriteVIMinMaxX", "ReadVIMinMaxV", "ReadVIMinMaxX", mx>;
}
}
-multiclass VPseudoVMUL_VV_VX {
+multiclass VPseudoVMUL_VV_VX<bit Commutable = 0> {
foreach m = MxList in {
defvar mx = m.MX;
- defm "" : VPseudoBinaryV_VV<m>,
+ defm "" : VPseudoBinaryV_VV<m, Commutable=Commutable>,
SchedBinary<"WriteVIMulV", "ReadVIMulV", "ReadVIMulV", mx>;
defm "" : VPseudoBinaryV_VX<m>,
SchedBinary<"WriteVIMulX", "ReadVIMulV", "ReadVIMulX", mx>;
@@ -2962,10 +2970,10 @@ multiclass VPseudoVALU_VX_VI<Operand ImmType = simm5> {
}
}
-multiclass VPseudoVWALU_VV_VX {
+multiclass VPseudoVWALU_VV_VX<bit Commutable = 0> {
foreach m = MxListW in {
defvar mx = m.MX;
- defm "" : VPseudoBinaryW_VV<m>,
+ defm "" : VPseudoBinaryW_VV<m, Commutable=Commutable>,
SchedBinary<"WriteVIWALUV", "ReadVIWALUV", "ReadVIWALUV", mx,
forceMergeOpRead=true>;
defm "" : VPseudoBinaryW_VX<m>,
@@ -2974,10 +2982,10 @@ multiclass VPseudoVWALU_VV_VX {
}
}
-multiclass VPseudoVWMUL_VV_VX {
+multiclass VPseudoVWMUL_VV_VX<bit Commutable = 0> {
foreach m = MxListW in {
defvar mx = m.MX;
- defm "" : VPseudoBinaryW_VV<m>,
+ defm "" : VPseudoBinaryW_VV<m, Commutable=Commutable>,
SchedBinary<"WriteVIWMulV", "ReadVIWMulV", "ReadVIWMulV", mx,
forceMergeOpRead=true>;
defm "" : VPseudoBinaryW_VX<m>,
@@ -3072,7 +3080,7 @@ multiclass VPseudoVMRG_VM_XM_IM {
multiclass VPseudoVCALU_VM_XM_IM {
foreach m = MxList in {
defvar mx = m.MX;
- defm "" : VPseudoTiedBinaryV_VM<m>,
+ defm "" : VPseudoTiedBinaryV_VM<m, Commutable=1>,
SchedBinary<"WriteVICALUV", "ReadVICALUV", "ReadVICALUV", mx,
forceMergeOpRead=true>;
defm "" : VPseudoTiedBinaryV_XM<m>,
@@ -3285,10 +3293,10 @@ multiclass VPseudoTernaryV_VF_AAXA_RM<LMULInfo m, FPR_Info f,
sew, Commutable=1>;
}
-multiclass VPseudoTernaryW_VV<LMULInfo m> {
+multiclass VPseudoTernaryW_VV<LMULInfo m, bit Commutable = 0> {
defvar constraint = "@earlyclobber $rd";
defm _VV : VPseudoTernaryWithPolicy<m.wvrclass, m.vrclass, m.vrclass, m,
- constraint, /*Commutable*/ 0, TargetConstraintType=3>;
+ constraint, Commutable=Commutable, TargetConstraintType=3>;
}
multiclass VPseudoTernaryW_VV_RM<LMULInfo m, int sew = 0> {
@@ -3378,10 +3386,10 @@ multiclass VPseudoVSLD_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
}
}
-multiclass VPseudoVWMAC_VV_VX {
+multiclass VPseudoVWMAC_VV_VX<bit Commutable = 0> {
foreach m = MxListW in {
defvar mx = m.MX;
- defm "" : VPseudoTernaryW_VV<m>,
+ defm "" : VPseudoTernaryW_VV<m, Commutable=Commutable>,
SchedTernary<"WriteVIWMulAddV", "ReadVIWMulAddV", "ReadVIWMulAddV",
"ReadVIWMulAddV", mx>;
defm "" : VPseudoTernaryW_VX<m>,
@@ -3434,10 +3442,10 @@ multiclass VPseudoVWMAC_VV_VF_BF_RM {
}
}
-multiclass VPseudoVCMPM_VV_VX_VI {
+multiclass VPseudoVCMPM_VV_VX_VI<bit Commutable = 0> {
foreach m = MxList in {
defvar mx = m.MX;
- defm "" : VPseudoBinaryM_VV<m, TargetConstraintType=2>,
+ defm "" : VPseudoBinaryM_VV<m, TargetConstraintType=2, Commutable=Commutable>,
SchedBinary<"WriteVICmpV", "ReadVICmpV", "ReadVICmpV", mx>;
defm "" : VPseudoBinaryM_VX<m, TargetConstraintType=2>,
SchedBinary<"WriteVICmpX", "ReadVICmpV", "ReadVICmpX", mx>;
@@ -6246,7 +6254,7 @@ defm PseudoVLSEG : VPseudoUSSegLoadFF;
//===----------------------------------------------------------------------===//
// 11.1. Vector Single-Width Integer Add and Subtract
//===----------------------------------------------------------------------===//
-defm PseudoVADD : VPseudoVALU_VV_VX_VI;
+defm PseudoVADD : VPseudoVALU_VV_VX_VI<Commutable=1>;
defm PseudoVSUB : VPseudoVALU_VV_VX;
defm PseudoVRSUB : VPseudoVALU_VX_VI;
@@ -6311,9 +6319,9 @@ foreach vti = AllIntegerVectors in {
//===----------------------------------------------------------------------===//
// 11.2. Vector Widening Integer Add/Subtract
//===----------------------------------------------------------------------===//
-defm PseudoVWADDU : VPseudoVWALU_VV_VX;
+defm PseudoVWADDU : VPseudoVWALU_VV_VX<Commutable=1>;
defm PseudoVWSUBU : VPseudoVWALU_VV_VX;
-defm PseudoVWADD : VPseudoVWALU_VV_VX;
+defm PseudoVWADD : VPseudoVWALU_VV_VX<Commutable=1>;
defm PseudoVWSUB : VPseudoVWALU_VV_VX;
defm PseudoVWADDU : VPseudoVWALU_WV_WX;
defm PseudoVWSUBU : VPseudoVWALU_WV_WX;
@@ -6344,9 +6352,9 @@ defm PseudoVMSBC : VPseudoVCALUM_V_X<"@earlyclobber $rd">;
//===----------------------------------------------------------------------===//
// 11.5. Vector Bitwise Logical Instructions
//===----------------------------------------------------------------------===//
-defm PseudoVAND : VPseudoVALU_VV_VX_VI;
-defm PseudoVOR : VPseudoVALU_VV_VX_VI;
-defm PseudoVXOR : VPseudoVALU_VV_VX_VI;
+defm PseudoVAND : VPseudoVALU_VV_VX_VI<Commutable=1>;
+defm PseudoVOR : VPseudoVALU_VV_VX_VI<Commutable=1>;
+defm PseudoVXOR : VPseudoVALU_VV_VX_VI<Commutable=1>;
//===----------------------------------------------------------------------===//
// 11.6. Vector Single-Width Bit Shift Instructions
@@ -6364,8 +6372,8 @@ defm PseudoVNSRA : VPseudoVNSHT_WV_WX_WI;
//===----------------------------------------------------------------------===//
// 11.8. Vector Integer Comparison Instructions
//===----------------------------------------------------------------------===//
-defm PseudoVMSEQ : VPseudoVCMPM_VV_VX_VI;
-defm PseudoVMSNE : VPseudoVCMPM_VV_VX_VI;
+defm PseudoVMSEQ : VPseudoVCMPM_VV_VX_VI<Commutable=1>;
+defm PseudoVMSNE : VPseudoVCMPM_VV_VX_VI<Commutable=1>;
defm PseudoVMSLTU : VPseudoVCMPM_VV_VX;
defm PseudoVMSLT : VPseudoVCMPM_VV_VX;
defm PseudoVMSLEU : VPseudoVCMPM_VV_VX_VI;
@@ -6384,9 +6392,9 @@ defm PseudoVMAX : VPseudoVMINMAX_VV_VX;
//===----------------------------------------------------------------------===//
// 11.10. Vector Single-Width Integer Multiply Instructions
//===----------------------------------------------------------------------===//
-defm PseudoVMUL : VPseudoVMUL_VV_VX;
-defm PseudoVMULH : VPseudoVMUL_VV_VX;
-defm PseudoVMULHU : VPseudoVMUL_VV_VX;
+defm PseudoVMUL : VPseudoVMUL_VV_VX<Commutable=1>;
+defm PseudoVMULH : VPseudoVMUL_VV_VX<Commutable=1>;
+defm PseudoVMULHU : VPseudoVMUL_VV_VX<Commutable=1>;
defm PseudoVMULHSU : VPseudoVMUL_VV_VX;
//===----------------------------------------------------------------------===//
@@ -6400,8 +6408,8 @@ defm PseudoVREM : VPseudoVDIV_VV_VX;
//===----------------------------------------------------------------------===//
// 11.12. Vector Widening Integer Multiply Instructions
//===----------------------------------------------------------------------===//
-defm PseudoVWMUL : VPseudoVWMUL_VV_VX;
-defm PseudoVWMULU : VPseudoVWMUL_VV_VX;
+defm PseudoVWMUL : VPseudoVWMUL_VV_VX<Commutable=1>;
+defm PseudoVWMULU : VPseudoVWMUL_VV_VX<Commutable=1>;
defm PseudoVWMULSU : VPseudoVWMUL_VV_VX;
//===----------------------------------------------------------------------===//
@@ -6415,8 +6423,8 @@ defm PseudoVNMSUB : VPseudoVMAC_VV_VX_AAXA;
//===----------------------------------------------------------------------===//
// 11.14. Vector Widening Integer Multiply-Add Instructions
//===----------------------------------------------------------------------===//
-defm PseudoVWMACCU : VPseudoVWMAC_VV_VX;
-defm PseudoVWMACC : VPseudoVWMAC_VV_VX;
+defm PseudoVWMACCU : VPseudoVWMAC_VV_VX<Commutable=1>;
+defm PseudoVWMACC : VPseudoVWMAC_VV_VX<Commutable=1>;
defm PseudoVWMACCSU : VPseudoVWMAC_VV_VX;
defm PseudoVWMACCUS : VPseudoVWMAC_VX;
diff --git a/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp b/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp
index 39d420c..ead91c5 100644
--- a/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp
+++ b/llvm/lib/Target/RISCV/RISCVOptWInstrs.cpp
@@ -12,15 +12,24 @@
// extended bits aren't consumed or because the input was already sign extended
// by an earlier instruction.
//
-// Then it removes the -w suffix from opw instructions whenever all users are
-// dependent only on the lower word of the result of the instruction.
-// The cases handled are:
-// * addw because c.add has a larger register encoding than c.addw.
-// * addiw because it helps reduce test differences between RV32 and RV64
-// w/o being a pessimization.
-// * mulw because c.mulw doesn't exist but c.mul does (w/ zcb)
-// * slliw because c.slliw doesn't exist and c.slli does
+// Then:
+// 1. Unless explicit disabled or the target prefers instructions with W suffix,
+// it removes the -w suffix from opw instructions whenever all users are
+// dependent only on the lower word of the result of the instruction.
+// The cases handled are:
+// * addw because c.add has a larger register encoding than c.addw.
+// * addiw because it helps reduce test differences between RV32 and RV64
+// w/o being a pessimization.
+// * mulw because c.mulw doesn't exist but c.mul does (w/ zcb)
+// * slliw because c.slliw doesn't exist and c.slli does
//
+// 2. Or if explicit enabled or the target prefers instructions with W suffix,
+// it adds the W suffix to the instruction whenever all users are dependent
+// only on the lower word of the result of the instruction.
+// The cases handled are:
+// * add/addi/sub/mul.
+// * slli with imm < 32.
+// * ld/lwu.
//===---------------------------------------------------------------------===//
#include "RISCV.h"
@@ -60,6 +69,8 @@ public:
const RISCVSubtarget &ST, MachineRegisterInfo &MRI);
bool stripWSuffixes(MachineFunction &MF, const RISCVInstrInfo &TII,
const RISCVSubtarget &ST, MachineRegisterInfo &MRI);
+ bool appendWSuffixes(MachineFunction &MF, const RISCVInstrInfo &TII,
+ const RISCVSubtarget &ST, MachineRegisterInfo &MRI);
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
@@ -672,9 +683,6 @@ bool RISCVOptWInstrs::stripWSuffixes(MachineFunction &MF,
const RISCVInstrInfo &TII,
const RISCVSubtarget &ST,
MachineRegisterInfo &MRI) {
- if (DisableStripWSuffix || !ST.enableStripWSuffix())
- return false;
-
bool MadeChange = false;
for (MachineBasicBlock &MBB : MF) {
for (MachineInstr &MI : MBB) {
@@ -698,6 +706,58 @@ bool RISCVOptWInstrs::stripWSuffixes(MachineFunction &MF,
return MadeChange;
}
+bool RISCVOptWInstrs::appendWSuffixes(MachineFunction &MF,
+ const RISCVInstrInfo &TII,
+ const RISCVSubtarget &ST,
+ MachineRegisterInfo &MRI) {
+ bool MadeChange = false;
+ for (MachineBasicBlock &MBB : MF) {
+ for (MachineInstr &MI : MBB) {
+ unsigned WOpc;
+ // TODO: Add more?
+ switch (MI.getOpcode()) {
+ default:
+ continue;
+ case RISCV::ADD:
+ WOpc = RISCV::ADDW;
+ break;
+ case RISCV::ADDI:
+ WOpc = RISCV::ADDIW;
+ break;
+ case RISCV::SUB:
+ WOpc = RISCV::SUBW;
+ break;
+ case RISCV::MUL:
+ WOpc = RISCV::MULW;
+ break;
+ case RISCV::SLLI:
+ // SLLIW reads the lowest 5 bits, while SLLI reads lowest 6 bits
+ if (MI.getOperand(2).getImm() >= 32)
+ continue;
+ WOpc = RISCV::SLLIW;
+ break;
+ case RISCV::LD:
+ case RISCV::LWU:
+ WOpc = RISCV::LW;
+ break;
+ }
+
+ if (hasAllWUsers(MI, ST, MRI)) {
+ LLVM_DEBUG(dbgs() << "Replacing " << MI);
+ MI.setDesc(TII.get(WOpc));
+ MI.clearFlag(MachineInstr::MIFlag::NoSWrap);
+ MI.clearFlag(MachineInstr::MIFlag::NoUWrap);
+ MI.clearFlag(MachineInstr::MIFlag::IsExact);
+ LLVM_DEBUG(dbgs() << " with " << MI);
+ ++NumTransformedToWInstrs;
+ MadeChange = true;
+ }
+ }
+ }
+
+ return MadeChange;
+}
+
bool RISCVOptWInstrs::runOnMachineFunction(MachineFunction &MF) {
if (skipFunction(MF.getFunction()))
return false;
@@ -711,7 +771,12 @@ bool RISCVOptWInstrs::runOnMachineFunction(MachineFunction &MF) {
bool MadeChange = false;
MadeChange |= removeSExtWInstrs(MF, TII, ST, MRI);
- MadeChange |= stripWSuffixes(MF, TII, ST, MRI);
+
+ if (!(DisableStripWSuffix || ST.preferWInst()))
+ MadeChange |= stripWSuffixes(MF, TII, ST, MRI);
+
+ if (ST.preferWInst())
+ MadeChange |= appendWSuffixes(MF, TII, ST, MRI);
return MadeChange;
}
diff --git a/llvm/lib/Target/SPIRV/SPIRVTargetMachine.cpp b/llvm/lib/Target/SPIRV/SPIRVTargetMachine.cpp
index fbf64f2..ae8baa3 100644
--- a/llvm/lib/Target/SPIRV/SPIRVTargetMachine.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVTargetMachine.cpp
@@ -55,9 +55,9 @@ static std::string computeDataLayout(const Triple &TT) {
// mean anything.
if (Arch == Triple::spirv32)
return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-"
- "v96:128-v192:256-v256:256-v512:512-v1024:1024";
+ "v96:128-v192:256-v256:256-v512:512-v1024:1024-G1";
return "e-i64:64-v16:16-v24:32-v32:32-v48:64-"
- "v96:128-v192:256-v256:256-v512:512-v1024:1024";
+ "v96:128-v192:256-v256:256-v512:512-v1024:1024-G1";
}
static Reloc::Model getEffectiveRelocModel(std::optional<Reloc::Model> RM) {
diff --git a/llvm/lib/Target/X86/GISel/X86RegisterBankInfo.cpp b/llvm/lib/Target/X86/GISel/X86RegisterBankInfo.cpp
index e7c9e60..9e85424 100644
--- a/llvm/lib/Target/X86/GISel/X86RegisterBankInfo.cpp
+++ b/llvm/lib/Target/X86/GISel/X86RegisterBankInfo.cpp
@@ -13,10 +13,13 @@
#include "X86RegisterBankInfo.h"
#include "X86InstrInfo.h"
#include "X86Subtarget.h"
+#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
+#include "llvm/CodeGen/GlobalISel/Utils.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegisterBank.h"
#include "llvm/CodeGen/RegisterBankInfo.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/IR/IntrinsicsX86.h"
#define GET_TARGET_REGBANK_IMPL
#include "X86GenRegisterBank.inc"
@@ -68,6 +71,98 @@ X86RegisterBankInfo::getRegBankFromRegClass(const TargetRegisterClass &RC,
llvm_unreachable("Unsupported register kind yet.");
}
+// \returns true if a given intrinsic only uses and defines FPRs.
+static bool isFPIntrinsic(const MachineRegisterInfo &MRI,
+ const MachineInstr &MI) {
+ // TODO: Add more intrinsics.
+ switch (cast<GIntrinsic>(MI).getIntrinsicID()) {
+ default:
+ return false;
+ // SSE1
+ case Intrinsic::x86_sse_rcp_ss:
+ case Intrinsic::x86_sse_rcp_ps:
+ case Intrinsic::x86_sse_rsqrt_ss:
+ case Intrinsic::x86_sse_rsqrt_ps:
+ case Intrinsic::x86_sse_min_ss:
+ case Intrinsic::x86_sse_min_ps:
+ case Intrinsic::x86_sse_max_ss:
+ case Intrinsic::x86_sse_max_ps:
+ return true;
+ }
+ return false;
+}
+
+bool X86RegisterBankInfo::hasFPConstraints(const MachineInstr &MI,
+ const MachineRegisterInfo &MRI,
+ const TargetRegisterInfo &TRI,
+ unsigned Depth) const {
+ unsigned Op = MI.getOpcode();
+ if (Op == TargetOpcode::G_INTRINSIC && isFPIntrinsic(MRI, MI))
+ return true;
+
+ // Do we have an explicit floating point instruction?
+ if (isPreISelGenericFloatingPointOpcode(Op))
+ return true;
+
+ // No. Check if we have a copy-like instruction. If we do, then we could
+ // still be fed by floating point instructions.
+ if (Op != TargetOpcode::COPY && !MI.isPHI() &&
+ !isPreISelGenericOptimizationHint(Op))
+ return false;
+
+ // Check if we already know the register bank.
+ auto *RB = getRegBank(MI.getOperand(0).getReg(), MRI, TRI);
+ if (RB == &getRegBank(X86::PSRRegBankID))
+ return true;
+ if (RB == &getRegBank(X86::GPRRegBankID))
+ return false;
+
+ // We don't know anything.
+ //
+ // If we have a phi, we may be able to infer that it will be assigned a fp
+ // type based off of its inputs.
+ if (!MI.isPHI() || Depth > MaxFPRSearchDepth)
+ return false;
+
+ return any_of(MI.explicit_uses(), [&](const MachineOperand &Op) {
+ return Op.isReg() &&
+ onlyDefinesFP(*MRI.getVRegDef(Op.getReg()), MRI, TRI, Depth + 1);
+ });
+}
+
+bool X86RegisterBankInfo::onlyUsesFP(const MachineInstr &MI,
+ const MachineRegisterInfo &MRI,
+ const TargetRegisterInfo &TRI,
+ unsigned Depth) const {
+ switch (MI.getOpcode()) {
+ case TargetOpcode::G_FPTOSI:
+ case TargetOpcode::G_FPTOUI:
+ case TargetOpcode::G_FCMP:
+ case TargetOpcode::G_LROUND:
+ case TargetOpcode::G_LLROUND:
+ case TargetOpcode::G_INTRINSIC_TRUNC:
+ case TargetOpcode::G_INTRINSIC_ROUND:
+ return true;
+ default:
+ break;
+ }
+ return hasFPConstraints(MI, MRI, TRI, Depth);
+}
+
+bool X86RegisterBankInfo::onlyDefinesFP(const MachineInstr &MI,
+ const MachineRegisterInfo &MRI,
+ const TargetRegisterInfo &TRI,
+ unsigned Depth) const {
+ switch (MI.getOpcode()) {
+ case TargetOpcode::G_SITOFP:
+ case TargetOpcode::G_UITOFP:
+ return true;
+ default:
+ break;
+ }
+ return hasFPConstraints(MI, MRI, TRI, Depth);
+}
+
X86GenRegisterBankInfo::PartialMappingIdx
X86GenRegisterBankInfo::getPartialMappingIdx(const MachineInstr &MI,
const LLT &Ty, bool isFP) {
@@ -180,11 +275,13 @@ X86RegisterBankInfo::getSameOperandsMapping(const MachineInstr &MI,
const RegisterBankInfo::InstructionMapping &
X86RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
const MachineFunction &MF = *MI.getParent()->getParent();
+ const TargetSubtargetInfo &STI = MF.getSubtarget();
+ const TargetRegisterInfo &TRI = *STI.getRegisterInfo();
const MachineRegisterInfo &MRI = MF.getRegInfo();
unsigned Opc = MI.getOpcode();
- // Try the default logic for non-generic instructions that are either copies
- // or already have some operands assigned to banks.
+ // Try the default logic for non-generic instructions that are either
+ // copies or already have some operands assigned to banks.
if (!isPreISelGenericOpcode(Opc) || Opc == TargetOpcode::G_PHI) {
const InstructionMapping &Mapping = getInstrMappingImpl(MI);
if (Mapping.isValid())
@@ -221,13 +318,14 @@ X86RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case TargetOpcode::G_FPEXT:
case TargetOpcode::G_FPTRUNC:
case TargetOpcode::G_FCONSTANT:
- // Instruction having only floating-point operands (all scalars in VECRReg)
+ // Instruction having only floating-point operands (all scalars in
+ // VECRReg)
getInstrPartialMappingIdxs(MI, MRI, /* isFP= */ true, OpRegBankIdx);
break;
case TargetOpcode::G_SITOFP:
case TargetOpcode::G_FPTOSI: {
- // Some of the floating-point instructions have mixed GPR and FP operands:
- // fine-tune the computed mapping.
+ // Some of the floating-point instructions have mixed GPR and FP
+ // operands: fine-tune the computed mapping.
auto &Op0 = MI.getOperand(0);
auto &Op1 = MI.getOperand(1);
const LLT Ty0 = MRI.getType(Op0.getReg());
@@ -271,9 +369,36 @@ X86RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
getInstrPartialMappingIdxs(MI, MRI, /* isFP= */ isFPTrunc || isFPAnyExt,
OpRegBankIdx);
- } break;
+ break;
+ }
+ case TargetOpcode::G_LOAD: {
+ // Check if that load feeds fp instructions.
+ // In that case, we want the default mapping to be on FPR
+ // instead of blind map every scalar to GPR.
+ bool IsFP = any_of(MRI.use_nodbg_instructions(cast<GLoad>(MI).getDstReg()),
+ [&](const MachineInstr &UseMI) {
+ // If we have at least one direct use in a FP
+ // instruction, assume this was a floating point load
+ // in the IR. If it was not, we would have had a
+ // bitcast before reaching that instruction.
+ return onlyUsesFP(UseMI, MRI, TRI);
+ });
+ getInstrPartialMappingIdxs(MI, MRI, IsFP, OpRegBankIdx);
+ break;
+ }
+ case TargetOpcode::G_STORE: {
+ // Check if that store is fed by fp instructions.
+ Register VReg = cast<GStore>(MI).getValueReg();
+ if (!VReg)
+ break;
+ MachineInstr *DefMI = MRI.getVRegDef(VReg);
+ bool IsFP = onlyDefinesFP(*DefMI, MRI, TRI);
+ getInstrPartialMappingIdxs(MI, MRI, IsFP, OpRegBankIdx);
+ break;
+ }
default:
- // Track the bank of each register, use NotFP mapping (all scalars in GPRs)
+ // Track the bank of each register, use NotFP mapping (all scalars in
+ // GPRs)
getInstrPartialMappingIdxs(MI, MRI, /* isFP= */ false, OpRegBankIdx);
break;
}
diff --git a/llvm/lib/Target/X86/GISel/X86RegisterBankInfo.h b/llvm/lib/Target/X86/GISel/X86RegisterBankInfo.h
index 989c595..8f38e71 100644
--- a/llvm/lib/Target/X86/GISel/X86RegisterBankInfo.h
+++ b/llvm/lib/Target/X86/GISel/X86RegisterBankInfo.h
@@ -62,6 +62,22 @@ private:
const SmallVectorImpl<PartialMappingIdx> &OpRegBankIdx,
SmallVectorImpl<const ValueMapping *> &OpdsMapping);
+ // Maximum recursion depth for hasFPConstraints.
+ const unsigned MaxFPRSearchDepth = 2;
+
+ /// \returns true if \p MI only uses and defines FPRs.
+ bool hasFPConstraints(const MachineInstr &MI, const MachineRegisterInfo &MRI,
+ const TargetRegisterInfo &TRI,
+ unsigned Depth = 0) const;
+
+ /// \returns true if \p MI only uses FPRs.
+ bool onlyUsesFP(const MachineInstr &MI, const MachineRegisterInfo &MRI,
+ const TargetRegisterInfo &TRI, unsigned Depth = 0) const;
+
+ /// \returns true if \p MI only defines FPRs.
+ bool onlyDefinesFP(const MachineInstr &MI, const MachineRegisterInfo &MRI,
+ const TargetRegisterInfo &TRI, unsigned Depth = 0) const;
+
public:
X86RegisterBankInfo(const TargetRegisterInfo &TRI);
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index bae8579..ba5db85 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -1774,6 +1774,13 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
if (Instruction *I = moveAddAfterMinMax(II, Builder))
return I;
+ // minmax (X & NegPow2C, Y & NegPow2C) --> minmax(X, Y) & NegPow2C
+ const APInt *RHSC;
+ if (match(I0, m_OneUse(m_And(m_Value(X), m_NegatedPower2(RHSC)))) &&
+ match(I1, m_OneUse(m_And(m_Value(Y), m_SpecificInt(*RHSC)))))
+ return BinaryOperator::CreateAnd(Builder.CreateBinaryIntrinsic(IID, X, Y),
+ ConstantInt::get(II->getType(), *RHSC));
+
// smax(X, -X) --> abs(X)
// smin(X, -X) --> -abs(X)
// umax(X, -X) --> -abs(X)
@@ -1815,7 +1822,6 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
return NewMinMax;
// Try to fold minmax with constant RHS based on range information
- const APInt *RHSC;
if (match(I1, m_APIntAllowUndef(RHSC))) {
ICmpInst::Predicate Pred =
ICmpInst::getNonStrictPredicate(MinMaxIntrinsic::getPredicate(IID));
diff --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp
index 380bac9..baec51a 100644
--- a/llvm/lib/Transforms/Utils/Local.cpp
+++ b/llvm/lib/Transforms/Utils/Local.cpp
@@ -1019,12 +1019,14 @@ CanRedirectPredsOfEmptyBBToSucc(BasicBlock *BB, BasicBlock *Succ,
const SmallPtrSetImpl<BasicBlock *> &SuccPreds,
BasicBlock *&CommonPred) {
- // There must be phis in BB, otherwise BB will be merged into Succ directly
- if (BB->phis().empty() || Succ->phis().empty())
+ // When Succ has no phis, BB may be merged into Succ directly. We don't need
+ // to redirect the predecessors of BB in this case.
+ if (Succ->phis().empty())
return false;
- // BB must have predecessors not shared that can be redirected to Succ
- if (!BB->hasNPredecessorsOrMore(2))
+ // BB must have multiple different predecessors, so that at least one of
+ // predecessors can be redirected to Succ, except the common predecessor.
+ if (BB->getUniquePredecessor() || pred_empty(BB))
return false;
// Get single common predecessors of both BB and Succ
@@ -3627,10 +3629,12 @@ DIExpression *llvm::getExpressionForConstant(DIBuilder &DIB, const Constant &C,
return createIntegerExpression(C);
auto *FP = dyn_cast<ConstantFP>(&C);
- if (FP && (Ty.isFloatTy() || Ty.isDoubleTy())) {
+ if (FP && Ty.isFloatingPointTy() && Ty.getScalarSizeInBits() <= 64) {
const APFloat &APF = FP->getValueAPF();
- return DIB.createConstantValueExpression(
- APF.bitcastToAPInt().getZExtValue());
+ APInt const &API = APF.bitcastToAPInt();
+ if (auto Temp = API.getZExtValue())
+ return DIB.createConstantValueExpression(static_cast<uint64_t>(Temp));
+ return DIB.createConstantValueExpression(*API.getRawData());
}
if (!Ty.isPointerTy())
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index c63b500..d0bcdce 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -15155,8 +15155,8 @@ bool SLPVectorizerPass::vectorizeStores(ArrayRef<StoreInst *> Stores,
Type *ValueTy = StoreTy;
if (auto *Trunc = dyn_cast<TruncInst>(Store->getValueOperand()))
ValueTy = Trunc->getSrcTy();
- unsigned MinVF = TTI->getStoreMinimumVF(
- R.getMinVF(DL->getTypeSizeInBits(StoreTy)), StoreTy, ValueTy);
+ unsigned MinVF = PowerOf2Ceil(TTI->getStoreMinimumVF(
+ R.getMinVF(DL->getTypeStoreSizeInBits(StoreTy)), StoreTy, ValueTy));
if (MaxVF < MinVF) {
LLVM_DEBUG(dbgs() << "SLP: Vectorization infeasible as MaxVF (" << MaxVF
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/vastart.ll b/llvm/test/CodeGen/AArch64/GlobalISel/vastart.ll
index bd576d0..8c6e01d 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/vastart.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/vastart.ll
@@ -3,7 +3,7 @@
declare void @llvm.va_start(ptr)
-define void @test_va_start(ptr %list) {
+define void @test_va_start(ptr %list, ...) {
; CHECK-LABEL: name: test_va_start
; CHECK: [[LIST:%[0-9]+]]:_(p0) = COPY $x0
; CHECK-IOS: G_VASTART [[LIST]](p0) :: (store (s64) into %ir.list, align 1)
diff --git a/llvm/test/CodeGen/AArch64/and-sink.ll b/llvm/test/CodeGen/AArch64/and-sink.ll
index f298a55..a57e9d5 100644
--- a/llvm/test/CodeGen/AArch64/and-sink.ll
+++ b/llvm/test/CodeGen/AArch64/and-sink.ll
@@ -11,15 +11,14 @@
define dso_local i32 @and_sink1(i32 %a, i1 %c) {
; CHECK-LABEL: and_sink1:
; CHECK: // %bb.0:
-; CHECK-NEXT: tbz w1, #0, .LBB0_3
+; CHECK-NEXT: tbz w1, #0, .LBB0_2
; CHECK-NEXT: // %bb.1: // %bb0
+; CHECK-NEXT: tst w0, #0x4
; CHECK-NEXT: adrp x8, A
+; CHECK-NEXT: cset w0, eq
; CHECK-NEXT: str wzr, [x8, :lo12:A]
-; CHECK-NEXT: tbnz w0, #2, .LBB0_3
-; CHECK-NEXT: // %bb.2:
-; CHECK-NEXT: mov w0, #1 // =0x1
; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB0_3: // %bb2
+; CHECK-NEXT: .LBB0_2:
; CHECK-NEXT: mov w0, wzr
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll b/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll
index 6449c3e1..dde3e81 100644
--- a/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll
+++ b/llvm/test/CodeGen/AArch64/combine-comparisons-by-cse.ll
@@ -13,10 +13,10 @@ define i32 @combine_gt_ge_10() #0 {
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: adrp x8, :got:a
; CHECK-NEXT: ldr x8, [x8, :got_lo12:a]
-; CHECK-NEXT: ldr w8, [x8]
-; CHECK-NEXT: cmp w8, #10
+; CHECK-NEXT: ldr w9, [x8]
; CHECK-NEXT: adrp x8, :got:b
; CHECK-NEXT: ldr x8, [x8, :got_lo12:b]
+; CHECK-NEXT: cmp w9, #10
; CHECK-NEXT: b.le .LBB0_3
; CHECK-NEXT: // %bb.1: // %land.lhs.true
; CHECK-NEXT: adrp x9, :got:c
@@ -29,18 +29,17 @@ define i32 @combine_gt_ge_10() #0 {
; CHECK-NEXT: mov w0, #1 // =0x1
; CHECK-NEXT: ret
; CHECK-NEXT: .LBB0_3: // %lor.lhs.false
-; CHECK-NEXT: b.lt .LBB0_6
+; CHECK-NEXT: cmp w9, #10
+; CHECK-NEXT: b.lt .LBB0_5
; CHECK-NEXT: .LBB0_4: // %land.lhs.true3
; CHECK-NEXT: adrp x9, :got:d
; CHECK-NEXT: ldr x9, [x9, :got_lo12:d]
; CHECK-NEXT: ldr w8, [x8]
; CHECK-NEXT: ldr w9, [x9]
; CHECK-NEXT: cmp w8, w9
-; CHECK-NEXT: b.ne .LBB0_6
-; CHECK-NEXT: // %bb.5:
-; CHECK-NEXT: mov w0, #1 // =0x1
+; CHECK-NEXT: cset w0, eq
; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB0_6: // %if.end
+; CHECK-NEXT: .LBB0_5:
; CHECK-NEXT: mov w0, wzr
; CHECK-NEXT: ret
entry:
@@ -145,10 +144,10 @@ define i32 @combine_lt_ge_5() #0 {
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: adrp x8, :got:a
; CHECK-NEXT: ldr x8, [x8, :got_lo12:a]
-; CHECK-NEXT: ldr w8, [x8]
-; CHECK-NEXT: cmp w8, #5
+; CHECK-NEXT: ldr w9, [x8]
; CHECK-NEXT: adrp x8, :got:b
; CHECK-NEXT: ldr x8, [x8, :got_lo12:b]
+; CHECK-NEXT: cmp w9, #5
; CHECK-NEXT: b.ge .LBB2_3
; CHECK-NEXT: // %bb.1: // %land.lhs.true
; CHECK-NEXT: adrp x9, :got:c
@@ -161,18 +160,17 @@ define i32 @combine_lt_ge_5() #0 {
; CHECK-NEXT: mov w0, #1 // =0x1
; CHECK-NEXT: ret
; CHECK-NEXT: .LBB2_3: // %lor.lhs.false
-; CHECK-NEXT: b.gt .LBB2_6
+; CHECK-NEXT: cmp w9, #5
+; CHECK-NEXT: b.gt .LBB2_5
; CHECK-NEXT: .LBB2_4: // %land.lhs.true3
; CHECK-NEXT: adrp x9, :got:d
; CHECK-NEXT: ldr x9, [x9, :got_lo12:d]
; CHECK-NEXT: ldr w8, [x8]
; CHECK-NEXT: ldr w9, [x9]
; CHECK-NEXT: cmp w8, w9
-; CHECK-NEXT: b.ne .LBB2_6
-; CHECK-NEXT: // %bb.5:
-; CHECK-NEXT: mov w0, #1 // =0x1
+; CHECK-NEXT: cset w0, eq
; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB2_6: // %if.end
+; CHECK-NEXT: .LBB2_5:
; CHECK-NEXT: mov w0, wzr
; CHECK-NEXT: ret
entry:
@@ -499,24 +497,17 @@ define i32 @do_nothing_if_resultant_opcodes_would_differ() #0 {
; CHECK-NEXT: // %bb.3: // %while.cond.while.end_crit_edge
; CHECK-NEXT: ldr w8, [x19]
; CHECK-NEXT: .LBB7_4: // %while.end
-; CHECK-NEXT: cmp w8, #1
-; CHECK-NEXT: b.gt .LBB7_7
-; CHECK-NEXT: // %bb.5: // %land.lhs.true
-; CHECK-NEXT: adrp x8, :got:b
-; CHECK-NEXT: adrp x9, :got:d
-; CHECK-NEXT: ldr x8, [x8, :got_lo12:b]
-; CHECK-NEXT: ldr x9, [x9, :got_lo12:d]
-; CHECK-NEXT: ldr w8, [x8]
-; CHECK-NEXT: ldr w9, [x9]
-; CHECK-NEXT: cmp w8, w9
-; CHECK-NEXT: b.ne .LBB7_7
-; CHECK-NEXT: // %bb.6:
-; CHECK-NEXT: mov w0, #123 // =0x7b
-; CHECK-NEXT: b .LBB7_8
-; CHECK-NEXT: .LBB7_7: // %if.end
-; CHECK-NEXT: mov w0, wzr
-; CHECK-NEXT: .LBB7_8: // %return
+; CHECK-NEXT: adrp x9, :got:b
+; CHECK-NEXT: adrp x10, :got:d
+; CHECK-NEXT: ldr x9, [x9, :got_lo12:b]
+; CHECK-NEXT: ldr x10, [x10, :got_lo12:d]
; CHECK-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT: ldr w9, [x9]
+; CHECK-NEXT: ldr w10, [x10]
+; CHECK-NEXT: cmp w9, w10
+; CHECK-NEXT: ccmp w8, #2, #0, eq
+; CHECK-NEXT: mov w8, #123 // =0x7b
+; CHECK-NEXT: csel w0, w8, wzr, lt
; CHECK-NEXT: ldr x30, [sp], #32 // 8-byte Folded Reload
; CHECK-NEXT: .cfi_def_cfa_offset 0
; CHECK-NEXT: .cfi_restore w19
@@ -564,52 +555,42 @@ return: ; preds = %if.end, %land.lhs.t
define i32 @do_nothing_if_compares_can_not_be_adjusted_to_each_other() #0 {
; CHECK-LABEL: do_nothing_if_compares_can_not_be_adjusted_to_each_other:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: stp x30, x19, [sp, #-16]! // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: .cfi_offset w19, -8
-; CHECK-NEXT: .cfi_offset w30, -16
-; CHECK-NEXT: .cfi_remember_state
; CHECK-NEXT: adrp x8, :got:a
; CHECK-NEXT: ldr x8, [x8, :got_lo12:a]
; CHECK-NEXT: ldr w8, [x8]
; CHECK-NEXT: cmp w8, #0
-; CHECK-NEXT: b.gt .LBB8_3
+; CHECK-NEXT: b.gt .LBB8_4
; CHECK-NEXT: // %bb.1: // %while.body.preheader
+; CHECK-NEXT: stp x30, x19, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w19, -8
+; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: sub w19, w8, #1
; CHECK-NEXT: .LBB8_2: // %while.body
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: bl do_something
; CHECK-NEXT: adds w19, w19, #1
; CHECK-NEXT: b.mi .LBB8_2
-; CHECK-NEXT: .LBB8_3: // %while.end
-; CHECK-NEXT: adrp x8, :got:c
-; CHECK-NEXT: ldr x8, [x8, :got_lo12:c]
-; CHECK-NEXT: ldr w8, [x8]
-; CHECK-NEXT: cmn w8, #2
-; CHECK-NEXT: b.lt .LBB8_6
-; CHECK-NEXT: // %bb.4: // %land.lhs.true
+; CHECK-NEXT: // %bb.3:
+; CHECK-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: .cfi_restore w19
+; CHECK-NEXT: .cfi_restore w30
+; CHECK-NEXT: .LBB8_4: // %while.end
; CHECK-NEXT: adrp x8, :got:b
; CHECK-NEXT: adrp x9, :got:d
+; CHECK-NEXT: adrp x10, :got:c
; CHECK-NEXT: ldr x8, [x8, :got_lo12:b]
; CHECK-NEXT: ldr x9, [x9, :got_lo12:d]
+; CHECK-NEXT: ldr x10, [x10, :got_lo12:c]
; CHECK-NEXT: ldr w8, [x8]
; CHECK-NEXT: ldr w9, [x9]
+; CHECK-NEXT: ldr w10, [x10]
; CHECK-NEXT: cmp w8, w9
-; CHECK-NEXT: b.ne .LBB8_6
-; CHECK-NEXT: // %bb.5:
-; CHECK-NEXT: mov w0, #123 // =0x7b
-; CHECK-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload
-; CHECK-NEXT: .cfi_def_cfa_offset 0
-; CHECK-NEXT: .cfi_restore w19
-; CHECK-NEXT: .cfi_restore w30
-; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB8_6: // %if.end
-; CHECK-NEXT: .cfi_restore_state
-; CHECK-NEXT: mov w0, wzr
-; CHECK-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload
-; CHECK-NEXT: .cfi_def_cfa_offset 0
-; CHECK-NEXT: .cfi_restore w19
-; CHECK-NEXT: .cfi_restore w30
+; CHECK-NEXT: mov w8, #-3 // =0xfffffffd
+; CHECK-NEXT: ccmp w10, w8, #4, eq
+; CHECK-NEXT: mov w8, #123 // =0x7b
+; CHECK-NEXT: csel w0, w8, wzr, gt
; CHECK-NEXT: ret
entry:
%0 = load i32, ptr @a, align 4
@@ -782,12 +763,14 @@ define i32 @combine_gt_ge_sel(i64 %v, ptr %p) #0 {
; CHECK-NEXT: cmp w8, #0
; CHECK-NEXT: csel x9, x0, xzr, gt
; CHECK-NEXT: str x9, [x1]
-; CHECK-NEXT: b.le .LBB11_2
+; CHECK-NEXT: b.le .LBB11_3
; CHECK-NEXT: // %bb.1: // %lor.lhs.false
; CHECK-NEXT: cmp w8, #2
-; CHECK-NEXT: b.ge .LBB11_4
-; CHECK-NEXT: b .LBB11_6
-; CHECK-NEXT: .LBB11_2: // %land.lhs.true
+; CHECK-NEXT: b.ge .LBB11_5
+; CHECK-NEXT: // %bb.2:
+; CHECK-NEXT: mov w0, wzr
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB11_3: // %land.lhs.true
; CHECK-NEXT: adrp x8, :got:b
; CHECK-NEXT: adrp x9, :got:c
; CHECK-NEXT: ldr x8, [x8, :got_lo12:b]
@@ -795,11 +778,11 @@ define i32 @combine_gt_ge_sel(i64 %v, ptr %p) #0 {
; CHECK-NEXT: ldr w8, [x8]
; CHECK-NEXT: ldr w9, [x9]
; CHECK-NEXT: cmp w8, w9
-; CHECK-NEXT: b.ne .LBB11_4
-; CHECK-NEXT: // %bb.3:
+; CHECK-NEXT: b.ne .LBB11_5
+; CHECK-NEXT: // %bb.4:
; CHECK-NEXT: mov w0, #1 // =0x1
; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB11_4: // %land.lhs.true3
+; CHECK-NEXT: .LBB11_5: // %land.lhs.true3
; CHECK-NEXT: adrp x8, :got:b
; CHECK-NEXT: adrp x9, :got:d
; CHECK-NEXT: ldr x8, [x8, :got_lo12:b]
@@ -807,12 +790,7 @@ define i32 @combine_gt_ge_sel(i64 %v, ptr %p) #0 {
; CHECK-NEXT: ldr w8, [x8]
; CHECK-NEXT: ldr w9, [x9]
; CHECK-NEXT: cmp w8, w9
-; CHECK-NEXT: b.ne .LBB11_6
-; CHECK-NEXT: // %bb.5:
-; CHECK-NEXT: mov w0, #1 // =0x1
-; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB11_6: // %if.end
-; CHECK-NEXT: mov w0, wzr
+; CHECK-NEXT: cset w0, eq
; CHECK-NEXT: ret
entry:
%0 = load i32, ptr @a, align 4
diff --git a/llvm/test/CodeGen/AMDGPU/bf16.ll b/llvm/test/CodeGen/AMDGPU/bf16.ll
index bf4302c..4c9c34d 100644
--- a/llvm/test/CodeGen/AMDGPU/bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/bf16.ll
@@ -38342,12 +38342,11 @@ define <32 x bfloat> @v_vselect_v32bf16(<32 x i1> %cond, <32 x bfloat> %a, <32 x
; GFX10-NEXT: v_and_b32_e32 v2, 1, v2
; GFX10-NEXT: v_and_b32_e32 v4, 1, v4
; GFX10-NEXT: v_and_b32_e32 v6, 1, v6
-; GFX10-NEXT: v_writelane_b32 v40, s31, 1
; GFX10-NEXT: v_and_b32_e32 v8, 1, v8
; GFX10-NEXT: v_and_b32_e32 v10, 1, v10
+; GFX10-NEXT: v_writelane_b32 v40, s31, 1
; GFX10-NEXT: v_and_b32_e32 v1, 1, v1
; GFX10-NEXT: v_and_b32_e32 v3, 1, v3
-; GFX10-NEXT: v_writelane_b32 v40, s34, 2
; GFX10-NEXT: v_and_b32_e32 v5, 1, v5
; GFX10-NEXT: v_and_b32_e32 v7, 1, v7
; GFX10-NEXT: v_and_b32_e32 v9, 1, v9
@@ -38366,7 +38365,7 @@ define <32 x bfloat> @v_vselect_v32bf16(<32 x i1> %cond, <32 x bfloat> %a, <32 x
; GFX10-NEXT: v_cmp_eq_u32_e64 s17, 1, v4
; GFX10-NEXT: v_cmp_eq_u32_e64 s18, 1, v2
; GFX10-NEXT: v_cmp_eq_u32_e64 s19, 1, v0
-; GFX10-NEXT: v_writelane_b32 v40, s35, 3
+; GFX10-NEXT: v_writelane_b32 v40, s34, 2
; GFX10-NEXT: v_cmp_eq_u32_e64 s20, 1, v27
; GFX10-NEXT: v_cmp_eq_u32_e64 s21, 1, v25
; GFX10-NEXT: v_cmp_eq_u32_e64 s22, 1, v23
@@ -38377,10 +38376,10 @@ define <32 x bfloat> @v_vselect_v32bf16(<32 x i1> %cond, <32 x bfloat> %a, <32 x
; GFX10-NEXT: v_cmp_eq_u32_e64 s27, 1, v13
; GFX10-NEXT: v_cmp_eq_u32_e64 s28, 1, v11
; GFX10-NEXT: v_cmp_eq_u32_e64 s29, 1, v7
-; GFX10-NEXT: v_cmp_eq_u32_e64 s30, 1, v3
-; GFX10-NEXT: v_cmp_eq_u32_e64 s31, 1, v1
-; GFX10-NEXT: v_cmp_eq_u32_e64 s34, 1, v5
-; GFX10-NEXT: v_cmp_eq_u32_e64 s35, 1, v9
+; GFX10-NEXT: v_cmp_eq_u32_e64 vcc_hi, 1, v3
+; GFX10-NEXT: v_cmp_eq_u32_e64 s30, 1, v1
+; GFX10-NEXT: v_cmp_eq_u32_e64 s31, 1, v5
+; GFX10-NEXT: v_cmp_eq_u32_e64 s34, 1, v9
; GFX10-NEXT: s_waitcnt vmcnt(32)
; GFX10-NEXT: v_lshrrev_b32_e32 v0, 16, v31
; GFX10-NEXT: s_waitcnt vmcnt(31)
@@ -38460,10 +38459,10 @@ define <32 x bfloat> @v_vselect_v32bf16(<32 x i1> %cond, <32 x bfloat> %a, <32 x
; GFX10-NEXT: v_cndmask_b32_e64 v6, v29, v39, s27
; GFX10-NEXT: v_cndmask_b32_e64 v5, v28, v26, s28
; GFX10-NEXT: v_cndmask_b32_e64 v20, v51, v20, s29
-; GFX10-NEXT: v_cndmask_b32_e64 v0, v14, v12, s31
-; GFX10-NEXT: v_cndmask_b32_e64 v1, v55, v16, s30
-; GFX10-NEXT: v_cndmask_b32_e64 v2, v53, v18, s34
-; GFX10-NEXT: v_cndmask_b32_e64 v12, v24, v22, s35
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v14, v12, s30
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v55, v16, vcc_hi
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v53, v18, s31
+; GFX10-NEXT: v_cndmask_b32_e64 v12, v24, v22, s34
; GFX10-NEXT: v_cndmask_b32_e64 v16, v4, v3, s4
; GFX10-NEXT: v_perm_b32 v0, v0, v64, 0x5040100
; GFX10-NEXT: v_perm_b32 v1, v1, v54, 0x5040100
@@ -38481,7 +38480,6 @@ define <32 x bfloat> @v_vselect_v32bf16(<32 x i1> %cond, <32 x bfloat> %a, <32 x
; GFX10-NEXT: v_perm_b32 v13, v66, v13, 0x5040100
; GFX10-NEXT: v_perm_b32 v14, v65, v17, 0x5040100
; GFX10-NEXT: v_perm_b32 v15, v16, v15, 0x5040100
-; GFX10-NEXT: v_readlane_b32 s35, v40, 3
; GFX10-NEXT: v_readlane_b32 s34, v40, 2
; GFX10-NEXT: v_readlane_b32 s31, v40, 1
; GFX10-NEXT: v_readlane_b32 s30, v40, 0
diff --git a/llvm/test/CodeGen/AMDGPU/build_vector.ll b/llvm/test/CodeGen/AMDGPU/build_vector.ll
index 37412ac..9975513 100644
--- a/llvm/test/CodeGen/AMDGPU/build_vector.ll
+++ b/llvm/test/CodeGen/AMDGPU/build_vector.ll
@@ -3,6 +3,7 @@
; RUN: llc < %s -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs | FileCheck %s --check-prefixes=GFX8,GFX678,ALL
; RUN: llc < %s -mtriple=amdgcn-amd-amdpal -mcpu=gfx1030 -verify-machineinstrs | FileCheck %s --check-prefixes=GFX10,GFX1011,ALL
; RUN: llc < %s -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -amdgpu-enable-vopd=0 -verify-machineinstrs | FileCheck %s --check-prefixes=GFX11,GFX1011,ALL
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx940 | FileCheck %s --check-prefixes=GFX940,ALL
; ALL-LABEL: {{^}}build_vector2:
; R600: MOV
@@ -96,3 +97,99 @@ define amdgpu_kernel void @build_vector_v2i16_trunc (ptr addrspace(1) %out, i32
store <2 x i16> %ins.1, ptr addrspace(1) %out
ret void
}
+
+; R600-LABEL: build_v2i32_from_v4i16_shuffle:
+; R600: ; %bb.0: ; %entry
+; R600-NEXT: ALU 0, @10, KC0[], KC1[]
+; R600-NEXT: TEX 1 @6
+; R600-NEXT: ALU 4, @11, KC0[CB0:0-32], KC1[]
+; R600-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XY, T1.X, 1
+; R600-NEXT: CF_END
+; R600-NEXT: PAD
+; R600-NEXT: Fetch clause starting at 6:
+; R600-NEXT: VTX_READ_16 T1.X, T0.X, 48, #3
+; R600-NEXT: VTX_READ_16 T0.X, T0.X, 44, #3
+; R600-NEXT: ALU clause starting at 10:
+; R600-NEXT: MOV * T0.X, 0.0,
+; R600-NEXT: ALU clause starting at 11:
+; R600-NEXT: LSHL * T0.Y, T1.X, literal.x,
+; R600-NEXT: 16(2.242078e-44), 0(0.000000e+00)
+; R600-NEXT: LSHL T0.X, T0.X, literal.x,
+; R600-NEXT: LSHR * T1.X, KC0[2].Y, literal.y,
+; R600-NEXT: 16(2.242078e-44), 2(2.802597e-45)
+;
+; GFX6-LABEL: build_v2i32_from_v4i16_shuffle:
+; GFX6: ; %bb.0: ; %entry
+; GFX6-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GFX6-NEXT: s_mov_b32 s7, 0xf000
+; GFX6-NEXT: s_waitcnt lgkmcnt(0)
+; GFX6-NEXT: s_lshl_b32 s3, s3, 16
+; GFX6-NEXT: s_lshl_b32 s2, s2, 16
+; GFX6-NEXT: s_mov_b32 s6, -1
+; GFX6-NEXT: s_mov_b32 s4, s0
+; GFX6-NEXT: s_mov_b32 s5, s1
+; GFX6-NEXT: v_mov_b32_e32 v0, s2
+; GFX6-NEXT: v_mov_b32_e32 v1, s3
+; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GFX6-NEXT: s_endpgm
+;
+; GFX8-LABEL: build_v2i32_from_v4i16_shuffle:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GFX8-NEXT: s_mov_b32 s7, 0xf000
+; GFX8-NEXT: s_mov_b32 s6, -1
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: s_mov_b32 s4, s0
+; GFX8-NEXT: s_mov_b32 s5, s1
+; GFX8-NEXT: s_lshl_b32 s0, s3, 16
+; GFX8-NEXT: s_lshl_b32 s1, s2, 16
+; GFX8-NEXT: v_mov_b32_e32 v0, s1
+; GFX8-NEXT: v_mov_b32_e32 v1, s0
+; GFX8-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GFX8-NEXT: s_endpgm
+;
+; GFX10-LABEL: build_v2i32_from_v4i16_shuffle:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x0
+; GFX10-NEXT: v_mov_b32_e32 v2, 0
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: s_lshl_b32 s2, s2, 16
+; GFX10-NEXT: s_lshl_b32 s3, s3, 16
+; GFX10-NEXT: v_mov_b32_e32 v0, s2
+; GFX10-NEXT: v_mov_b32_e32 v1, s3
+; GFX10-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: build_v2i32_from_v4i16_shuffle:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x0
+; GFX11-NEXT: v_mov_b32_e32 v2, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-NEXT: v_mov_b32_e32 v0, s2
+; GFX11-NEXT: v_mov_b32_e32 v1, s3
+; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX11-NEXT: s_nop 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+;
+; GFX940-LABEL: build_v2i32_from_v4i16_shuffle:
+; GFX940: ; %bb.0: ; %entry
+; GFX940-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
+; GFX940-NEXT: v_mov_b32_e32 v2, 0
+; GFX940-NEXT: s_waitcnt lgkmcnt(0)
+; GFX940-NEXT: s_lshl_b32 s3, s3, 16
+; GFX940-NEXT: s_lshl_b32 s2, s2, 16
+; GFX940-NEXT: v_mov_b32_e32 v0, s2
+; GFX940-NEXT: v_mov_b32_e32 v1, s3
+; GFX940-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1] sc0 sc1
+; GFX940-NEXT: s_endpgm
+define amdgpu_kernel void @build_v2i32_from_v4i16_shuffle(ptr addrspace(1) %out, <4 x i16> %in) {
+entry:
+ %shuf = shufflevector <4 x i16> %in, <4 x i16> zeroinitializer, <2 x i32> <i32 0, i32 2>
+ %zextended = zext <2 x i16> %shuf to <2 x i32>
+ %shifted = shl <2 x i32> %zextended, <i32 16, i32 16>
+ store <2 x i32> %shifted, ptr addrspace(1) %out
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.is.fpclass.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.is.fpclass.f16.ll
index ec3c08e..da64c37 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.is.fpclass.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.is.fpclass.f16.ll
@@ -1259,17 +1259,17 @@ define <4 x i1> @isnan_v4f16(<4 x half> %x) nounwind {
; GFX10SELDAG-LABEL: isnan_v4f16:
; GFX10SELDAG: ; %bb.0:
; GFX10SELDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10SELDAG-NEXT: v_mov_b32_e32 v2, 3
-; GFX10SELDAG-NEXT: v_cmp_class_f16_e64 s5, v0, 3
-; GFX10SELDAG-NEXT: v_cmp_class_f16_sdwa s4, v1, v2 src0_sel:WORD_1 src1_sel:DWORD
-; GFX10SELDAG-NEXT: v_cndmask_b32_e64 v4, 0, 1, s5
-; GFX10SELDAG-NEXT: v_cmp_class_f16_sdwa s5, v0, v2 src0_sel:WORD_1 src1_sel:DWORD
+; GFX10SELDAG-NEXT: v_cmp_class_f16_e64 s4, v0, 3
+; GFX10SELDAG-NEXT: v_mov_b32_e32 v3, 3
+; GFX10SELDAG-NEXT: v_cndmask_b32_e64 v5, 0, 1, s4
+; GFX10SELDAG-NEXT: v_cmp_class_f16_e64 s4, v1, 3
+; GFX10SELDAG-NEXT: v_cndmask_b32_e64 v2, 0, 1, s4
+; GFX10SELDAG-NEXT: v_cmp_class_f16_sdwa s4, v0, v3 src0_sel:WORD_1 src1_sel:DWORD
+; GFX10SELDAG-NEXT: v_mov_b32_e32 v0, v5
+; GFX10SELDAG-NEXT: v_cndmask_b32_e64 v4, 0, 1, s4
+; GFX10SELDAG-NEXT: v_cmp_class_f16_sdwa s4, v1, v3 src0_sel:WORD_1 src1_sel:DWORD
+; GFX10SELDAG-NEXT: v_mov_b32_e32 v1, v4
; GFX10SELDAG-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4
-; GFX10SELDAG-NEXT: v_mov_b32_e32 v0, v4
-; GFX10SELDAG-NEXT: v_cndmask_b32_e64 v5, 0, 1, s5
-; GFX10SELDAG-NEXT: v_cmp_class_f16_e64 s5, v1, 3
-; GFX10SELDAG-NEXT: v_mov_b32_e32 v1, v5
-; GFX10SELDAG-NEXT: v_cndmask_b32_e64 v2, 0, 1, s5
; GFX10SELDAG-NEXT: s_setpc_b64 s[30:31]
;
; GFX10GLISEL-LABEL: isnan_v4f16:
diff --git a/llvm/test/CodeGen/AMDGPU/load-constant-i16.ll b/llvm/test/CodeGen/AMDGPU/load-constant-i16.ll
index ab6a9dc..a87fa8b 100644
--- a/llvm/test/CodeGen/AMDGPU/load-constant-i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-constant-i16.ll
@@ -7404,35 +7404,35 @@ define amdgpu_kernel void @constant_sextload_v16i16_to_v16i64(ptr addrspace(1) %
; GFX12-NEXT: v_dual_mov_b32 v4, s22 :: v_dual_mov_b32 v9, s31
; GFX12-NEXT: v_dual_mov_b32 v8, s30 :: v_dual_mov_b32 v11, s35
; GFX12-NEXT: v_dual_mov_b32 v10, s34 :: v_dual_mov_b32 v3, s5
-; GFX12-NEXT: s_bfe_i64 s[10:11], s[0:1], 0x100000
-; GFX12-NEXT: s_lshr_b32 s12, s0, 16
-; GFX12-NEXT: s_mov_b32 s14, s1
-; GFX12-NEXT: s_lshr_b32 s16, s1, 16
-; GFX12-NEXT: s_bfe_i64 s[0:1], s[2:3], 0x100000
+; GFX12-NEXT: s_bfe_i64 s[16:17], s[2:3], 0x100000
; GFX12-NEXT: s_lshr_b32 s2, s2, 16
; GFX12-NEXT: s_bfe_i64 s[6:7], s[6:7], 0x100000
; GFX12-NEXT: v_dual_mov_b32 v0, s28 :: v_dual_mov_b32 v5, s23
; GFX12-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v13, s25
+; GFX12-NEXT: s_mov_b32 s12, s1
+; GFX12-NEXT: s_lshr_b32 s14, s1, 16
; GFX12-NEXT: s_bfe_i64 s[18:19], s[18:19], 0x100000
; GFX12-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x100000
; GFX12-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
; GFX12-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v7, s7
+; GFX12-NEXT: s_bfe_i64 s[10:11], s[0:1], 0x100000
+; GFX12-NEXT: s_lshr_b32 s0, s0, 16
; GFX12-NEXT: s_bfe_i64 s[2:3], s[2:3], 0x100000
; GFX12-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v17, s19
+; GFX12-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x100000
; GFX12-NEXT: s_bfe_i64 s[14:15], s[14:15], 0x100000
-; GFX12-NEXT: s_bfe_i64 s[16:17], s[16:17], 0x100000
; GFX12-NEXT: v_dual_mov_b32 v16, s18 :: v_dual_mov_b32 v19, s21
; GFX12-NEXT: v_mov_b32_e32 v18, s20
-; GFX12-NEXT: s_bfe_i64 s[12:13], s[12:13], 0x100000
+; GFX12-NEXT: s_bfe_i64 s[0:1], s[0:1], 0x100000
; GFX12-NEXT: s_clause 0x1
; GFX12-NEXT: global_store_b128 v24, v[8:11], s[8:9] offset:80
; GFX12-NEXT: global_store_b128 v24, v[0:3], s[8:9] offset:64
-; GFX12-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v0, s0
+; GFX12-NEXT: v_dual_mov_b32 v1, s17 :: v_dual_mov_b32 v0, s16
; GFX12-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v2, s2
-; GFX12-NEXT: v_dual_mov_b32 v9, s15 :: v_dual_mov_b32 v8, s14
-; GFX12-NEXT: v_dual_mov_b32 v11, s17 :: v_dual_mov_b32 v10, s16
+; GFX12-NEXT: v_dual_mov_b32 v9, s13 :: v_dual_mov_b32 v8, s12
+; GFX12-NEXT: v_dual_mov_b32 v11, s15 :: v_dual_mov_b32 v10, s14
; GFX12-NEXT: v_dual_mov_b32 v21, s11 :: v_dual_mov_b32 v20, s10
-; GFX12-NEXT: v_dual_mov_b32 v23, s13 :: v_dual_mov_b32 v22, s12
+; GFX12-NEXT: v_dual_mov_b32 v23, s1 :: v_dual_mov_b32 v22, s0
; GFX12-NEXT: s_clause 0x5
; GFX12-NEXT: global_store_b128 v24, v[12:15], s[8:9] offset:112
; GFX12-NEXT: global_store_b128 v24, v[4:7], s[8:9] offset:96
diff --git a/llvm/test/CodeGen/AMDGPU/load-constant-i8.ll b/llvm/test/CodeGen/AMDGPU/load-constant-i8.ll
index 952827b..889755c 100644
--- a/llvm/test/CodeGen/AMDGPU/load-constant-i8.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-constant-i8.ll
@@ -8808,73 +8808,73 @@ define amdgpu_kernel void @constant_sextload_v32i8_to_v32i64(ptr addrspace(1) %o
; GFX12-NEXT: v_lshrrev_b16 v2, 8, s6
; GFX12-NEXT: v_lshrrev_b16 v4, 8, s5
; GFX12-NEXT: v_lshrrev_b16 v8, 8, s2
-; GFX12-NEXT: s_lshr_b32 s24, s7, 16
+; GFX12-NEXT: s_lshr_b32 s22, s7, 16
; GFX12-NEXT: v_bfe_i32 v31, v1, 0, 8
-; GFX12-NEXT: s_lshr_b32 s42, s2, 24
-; GFX12-NEXT: s_mov_b32 s48, s7
+; GFX12-NEXT: s_lshr_b32 s40, s2, 24
+; GFX12-NEXT: s_mov_b32 s46, s7
; GFX12-NEXT: v_lshrrev_b16 v5, 8, s4
; GFX12-NEXT: v_lshrrev_b16 v7, 8, s1
-; GFX12-NEXT: s_lshr_b32 s26, s6, 16
-; GFX12-NEXT: s_lshr_b32 s44, s1, 16
+; GFX12-NEXT: s_lshr_b32 s24, s6, 16
+; GFX12-NEXT: s_lshr_b32 s42, s1, 16
; GFX12-NEXT: s_ashr_i64 s[58:59], s[6:7], 56
-; GFX12-NEXT: s_bfe_i64 s[48:49], s[48:49], 0x80000
-; GFX12-NEXT: s_bfe_i64 s[42:43], s[42:43], 0x80000
-; GFX12-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x80000
+; GFX12-NEXT: s_bfe_i64 s[46:47], s[46:47], 0x80000
+; GFX12-NEXT: s_bfe_i64 s[40:41], s[40:41], 0x80000
+; GFX12-NEXT: s_bfe_i64 s[22:23], s[22:23], 0x80000
; GFX12-NEXT: v_lshrrev_b16 v6, 8, s3
; GFX12-NEXT: v_lshrrev_b16 v3, 8, s0
-; GFX12-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v33, s24
-; GFX12-NEXT: s_lshr_b32 s28, s6, 24
-; GFX12-NEXT: s_lshr_b32 s30, s5, 16
-; GFX12-NEXT: s_lshr_b32 s40, s2, 16
+; GFX12-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v33, s22
+; GFX12-NEXT: s_lshr_b32 s26, s6, 24
+; GFX12-NEXT: s_lshr_b32 s28, s5, 16
+; GFX12-NEXT: s_lshr_b32 s38, s2, 16
; GFX12-NEXT: v_bfe_i32 v11, v8, 0, 8
; GFX12-NEXT: v_bfe_i32 v23, v4, 0, 8
; GFX12-NEXT: v_bfe_i32 v27, v2, 0, 8
; GFX12-NEXT: v_ashrrev_i32_e32 v32, 31, v31
-; GFX12-NEXT: s_bfe_i64 s[44:45], s[44:45], 0x80000
-; GFX12-NEXT: s_bfe_i64 s[26:27], s[26:27], 0x80000
-; GFX12-NEXT: v_dual_mov_b32 v34, s25 :: v_dual_mov_b32 v35, s58
-; GFX12-NEXT: v_dual_mov_b32 v36, s59 :: v_dual_mov_b32 v37, s26
-; GFX12-NEXT: v_dual_mov_b32 v56, s43 :: v_dual_mov_b32 v29, s48
-; GFX12-NEXT: v_mov_b32_e32 v30, s49
-; GFX12-NEXT: s_lshr_b32 s46, s0, 24
-; GFX12-NEXT: s_mov_b32 s50, s5
-; GFX12-NEXT: s_mov_b32 s52, s3
-; GFX12-NEXT: s_lshr_b32 s34, s4, 16
-; GFX12-NEXT: s_lshr_b32 s36, s4, 24
-; GFX12-NEXT: s_ashr_i64 s[22:23], s[2:3], 56
+; GFX12-NEXT: s_bfe_i64 s[42:43], s[42:43], 0x80000
+; GFX12-NEXT: s_bfe_i64 s[24:25], s[24:25], 0x80000
+; GFX12-NEXT: v_dual_mov_b32 v34, s23 :: v_dual_mov_b32 v35, s58
+; GFX12-NEXT: v_dual_mov_b32 v36, s59 :: v_dual_mov_b32 v37, s24
+; GFX12-NEXT: v_dual_mov_b32 v56, s41 :: v_dual_mov_b32 v29, s46
+; GFX12-NEXT: v_mov_b32_e32 v30, s47
+; GFX12-NEXT: s_lshr_b32 s44, s0, 24
+; GFX12-NEXT: s_mov_b32 s48, s5
+; GFX12-NEXT: s_mov_b32 s50, s3
+; GFX12-NEXT: s_lshr_b32 s30, s4, 16
+; GFX12-NEXT: s_lshr_b32 s34, s4, 24
+; GFX12-NEXT: s_ashr_i64 s[54:55], s[2:3], 56
; GFX12-NEXT: s_ashr_i64 s[56:57], s[4:5], 56
; GFX12-NEXT: v_bfe_i32 v7, v7, 0, 8
; GFX12-NEXT: v_bfe_i32 v19, v5, 0, 8
-; GFX12-NEXT: s_bfe_i64 s[40:41], s[40:41], 0x80000
-; GFX12-NEXT: s_bfe_i64 s[30:31], s[30:31], 0x80000
+; GFX12-NEXT: s_bfe_i64 s[38:39], s[38:39], 0x80000
; GFX12-NEXT: s_bfe_i64 s[28:29], s[28:29], 0x80000
-; GFX12-NEXT: s_lshr_b32 s38, s3, 16
-; GFX12-NEXT: s_mov_b32 s54, s1
+; GFX12-NEXT: s_bfe_i64 s[26:27], s[26:27], 0x80000
+; GFX12-NEXT: s_lshr_b32 s36, s3, 16
+; GFX12-NEXT: s_mov_b32 s52, s1
; GFX12-NEXT: s_bfe_i64 s[12:13], s[2:3], 0x80000
; GFX12-NEXT: s_bfe_i64 s[14:15], s[4:5], 0x80000
; GFX12-NEXT: s_bfe_i64 s[16:17], s[6:7], 0x80000
-; GFX12-NEXT: s_bfe_i64 s[2:3], s[52:53], 0x80000
-; GFX12-NEXT: s_bfe_i64 s[4:5], s[50:51], 0x80000
-; GFX12-NEXT: s_bfe_i64 s[6:7], s[46:47], 0x80000
+; GFX12-NEXT: s_bfe_i64 s[2:3], s[50:51], 0x80000
+; GFX12-NEXT: s_bfe_i64 s[4:5], s[48:49], 0x80000
+; GFX12-NEXT: s_bfe_i64 s[6:7], s[44:45], 0x80000
; GFX12-NEXT: s_lshr_b32 s20, s0, 16
; GFX12-NEXT: s_ashr_i64 s[18:19], s[0:1], 56
; GFX12-NEXT: v_bfe_i32 v3, v3, 0, 8
; GFX12-NEXT: v_bfe_i32 v15, v6, 0, 8
-; GFX12-NEXT: s_bfe_i64 s[36:37], s[36:37], 0x80000
; GFX12-NEXT: s_bfe_i64 s[34:35], s[34:35], 0x80000
-; GFX12-NEXT: v_dual_mov_b32 v38, s27 :: v_dual_mov_b32 v39, s28
-; GFX12-NEXT: v_dual_mov_b32 v40, s29 :: v_dual_mov_b32 v41, s30
-; GFX12-NEXT: v_dual_mov_b32 v42, s31 :: v_dual_mov_b32 v43, s56
-; GFX12-NEXT: v_dual_mov_b32 v44, s57 :: v_dual_mov_b32 v45, s34
-; GFX12-NEXT: v_dual_mov_b32 v52, s23 :: v_dual_mov_b32 v53, s40
-; GFX12-NEXT: v_dual_mov_b32 v54, s41 :: v_dual_mov_b32 v55, s42
+; GFX12-NEXT: s_bfe_i64 s[30:31], s[30:31], 0x80000
+; GFX12-NEXT: v_dual_mov_b32 v38, s25 :: v_dual_mov_b32 v39, s26
+; GFX12-NEXT: v_dual_mov_b32 v40, s27 :: v_dual_mov_b32 v41, s28
+; GFX12-NEXT: v_dual_mov_b32 v42, s29 :: v_dual_mov_b32 v43, s56
+; GFX12-NEXT: v_dual_mov_b32 v44, s57 :: v_dual_mov_b32 v45, s30
+; GFX12-NEXT: v_dual_mov_b32 v52, s55 :: v_dual_mov_b32 v53, s38
+; GFX12-NEXT: v_dual_mov_b32 v54, s39 :: v_dual_mov_b32 v55, s40
; GFX12-NEXT: s_bfe_i64 s[10:11], s[0:1], 0x80000
-; GFX12-NEXT: s_bfe_i64 s[0:1], s[54:55], 0x80000
+; GFX12-NEXT: s_bfe_i64 s[0:1], s[52:53], 0x80000
; GFX12-NEXT: v_ashrrev_i32_e32 v12, 31, v11
; GFX12-NEXT: v_ashrrev_i32_e32 v24, 31, v23
; GFX12-NEXT: v_ashrrev_i32_e32 v28, 31, v27
; GFX12-NEXT: global_store_b128 v0, v[33:36], s[8:9] offset:240
-; GFX12-NEXT: v_mov_b32_e32 v33, s44
+; GFX12-NEXT: v_mov_b32_e32 v33, s42
; GFX12-NEXT: global_store_b128 v0, v[29:32], s[8:9] offset:224
; GFX12-NEXT: v_dual_mov_b32 v25, s16 :: v_dual_mov_b32 v26, s17
; GFX12-NEXT: v_dual_mov_b32 v32, s7 :: v_dual_mov_b32 v21, s4
@@ -8882,16 +8882,16 @@ define amdgpu_kernel void @constant_sextload_v32i8_to_v32i64(ptr addrspace(1) %o
; GFX12-NEXT: v_dual_mov_b32 v14, s3 :: v_dual_mov_b32 v9, s12
; GFX12-NEXT: v_dual_mov_b32 v10, s13 :: v_dual_mov_b32 v5, s0
; GFX12-NEXT: s_bfe_i64 s[20:21], s[20:21], 0x80000
-; GFX12-NEXT: s_bfe_i64 s[38:39], s[38:39], 0x80000
-; GFX12-NEXT: v_dual_mov_b32 v46, s35 :: v_dual_mov_b32 v47, s36
-; GFX12-NEXT: v_dual_mov_b32 v48, s37 :: v_dual_mov_b32 v49, s38
-; GFX12-NEXT: v_dual_mov_b32 v34, s45 :: v_dual_mov_b32 v35, s18
+; GFX12-NEXT: s_bfe_i64 s[36:37], s[36:37], 0x80000
+; GFX12-NEXT: v_dual_mov_b32 v46, s31 :: v_dual_mov_b32 v47, s34
+; GFX12-NEXT: v_dual_mov_b32 v48, s35 :: v_dual_mov_b32 v49, s36
+; GFX12-NEXT: v_dual_mov_b32 v34, s43 :: v_dual_mov_b32 v35, s18
; GFX12-NEXT: v_dual_mov_b32 v36, s19 :: v_dual_mov_b32 v29, s20
; GFX12-NEXT: v_ashrrev_i32_e32 v8, 31, v7
; GFX12-NEXT: v_ashrrev_i32_e32 v20, 31, v19
; GFX12-NEXT: v_dual_mov_b32 v18, s15 :: v_dual_mov_b32 v13, s2
; GFX12-NEXT: v_dual_mov_b32 v6, s1 :: v_dual_mov_b32 v1, s10
-; GFX12-NEXT: v_dual_mov_b32 v50, s39 :: v_dual_mov_b32 v51, s22
+; GFX12-NEXT: v_dual_mov_b32 v50, s37 :: v_dual_mov_b32 v51, s54
; GFX12-NEXT: v_dual_mov_b32 v30, s21 :: v_dual_mov_b32 v31, s6
; GFX12-NEXT: v_ashrrev_i32_e32 v4, 31, v3
; GFX12-NEXT: v_ashrrev_i32_e32 v16, 31, v15
diff --git a/llvm/test/CodeGen/AMDGPU/sgpr-spill-overlap-wwm-reserve.mir b/llvm/test/CodeGen/AMDGPU/sgpr-spill-overlap-wwm-reserve.mir
index f8e7cb3..8a5f753 100644
--- a/llvm/test/CodeGen/AMDGPU/sgpr-spill-overlap-wwm-reserve.mir
+++ b/llvm/test/CodeGen/AMDGPU/sgpr-spill-overlap-wwm-reserve.mir
@@ -28,18 +28,17 @@ body: |
; GCN-LABEL: name: test_main
; GCN: bb.0:
; GCN-NEXT: successors: %bb.1(0x80000000)
- ; GCN-NEXT: liveins: $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $sgpr14, $sgpr15, $sgpr16, $sgpr17, $sgpr18, $sgpr19, $sgpr20, $sgpr21, $sgpr22, $sgpr23, $sgpr24, $sgpr25, $sgpr26, $sgpr27, $sgpr28, $sgpr29, $sgpr30, $sgpr31, $sgpr64, $sgpr65, $sgpr66, $sgpr67, $sgpr68, $sgpr69, $sgpr70, $sgpr71, $sgpr72, $sgpr73, $sgpr74, $sgpr75, $sgpr76, $sgpr77, $sgpr78, $sgpr79, $sgpr80, $sgpr81, $sgpr82, $sgpr83, $sgpr84, $sgpr85, $sgpr86, $sgpr87, $sgpr88, $sgpr89, $sgpr90, $sgpr91, $sgpr92, $sgpr93, $sgpr94, $sgpr95, $sgpr96, $sgpr97, $sgpr98, $sgpr99, $sgpr100, $sgpr101, $sgpr102, $sgpr103, $vgpr0, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+ ; GCN-NEXT: liveins: $vcc_hi, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $sgpr8, $sgpr9, $sgpr10, $sgpr11, $sgpr12, $sgpr13, $sgpr14, $sgpr15, $sgpr16, $sgpr17, $sgpr18, $sgpr19, $sgpr20, $sgpr21, $sgpr22, $sgpr23, $sgpr24, $sgpr25, $sgpr26, $sgpr27, $sgpr28, $sgpr29, $sgpr30, $sgpr31, $sgpr64, $sgpr65, $sgpr66, $sgpr67, $sgpr68, $sgpr69, $sgpr70, $sgpr71, $sgpr72, $sgpr73, $sgpr74, $sgpr75, $sgpr76, $sgpr77, $sgpr78, $sgpr79, $sgpr80, $sgpr81, $sgpr82, $sgpr83, $sgpr84, $sgpr85, $sgpr86, $sgpr87, $sgpr88, $sgpr89, $sgpr90, $sgpr91, $sgpr92, $sgpr93, $sgpr94, $sgpr95, $sgpr96, $sgpr97, $sgpr98, $sgpr99, $sgpr100, $sgpr101, $sgpr102, $sgpr103, $vgpr0, $vgpr2, $vgpr3, $vgpr4, $vgpr5
; GCN-NEXT: {{ $}}
- ; GCN-NEXT: $sgpr0 = COPY $sgpr33
+ ; GCN-NEXT: $vcc_hi = frame-setup COPY $sgpr33
; GCN-NEXT: $sgpr33 = frame-setup COPY $sgpr32
- ; GCN-NEXT: $sgpr1 = S_XOR_SAVEEXEC_B32 -1, implicit-def $exec, implicit-def dead $scc, implicit $exec
+ ; GCN-NEXT: $sgpr0 = S_XOR_SAVEEXEC_B32 -1, implicit-def $exec, implicit-def dead $scc, implicit $exec
; GCN-NEXT: SCRATCH_STORE_DWORD_SADDR $vgpr3, $sgpr33, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into %stack.69, addrspace 5)
; GCN-NEXT: SCRATCH_STORE_DWORD_SADDR $vgpr4, $sgpr33, 4, 0, implicit $exec, implicit $flat_scr :: (store (s32) into %stack.70, addrspace 5)
; GCN-NEXT: SCRATCH_STORE_DWORD_SADDR $vgpr5, $sgpr33, 8, 0, implicit $exec, implicit $flat_scr :: (store (s32) into %stack.71, addrspace 5)
; GCN-NEXT: SCRATCH_STORE_DWORD_SADDR $vgpr2, $sgpr33, 12, 0, implicit $exec, implicit $flat_scr :: (store (s32) into %stack.72, addrspace 5)
- ; GCN-NEXT: SCRATCH_STORE_DWORD_SADDR killed $vgpr1, $sgpr33, 16, 0, implicit $exec, implicit $flat_scr :: (store (s32) into %stack.74, addrspace 5)
- ; GCN-NEXT: $exec_lo = S_MOV_B32 killed $sgpr1
- ; GCN-NEXT: $vgpr5 = SI_SPILL_S32_TO_VGPR $sgpr0, 4, undef $vgpr5
+ ; GCN-NEXT: SCRATCH_STORE_DWORD_SADDR killed $vgpr1, $sgpr33, 16, 0, implicit $exec, implicit $flat_scr :: (store (s32) into %stack.73, addrspace 5)
+ ; GCN-NEXT: $exec_lo = S_MOV_B32 killed $sgpr0
; GCN-NEXT: $sgpr32 = frame-setup S_ADD_I32 $sgpr32, 24, implicit-def dead $scc
; GCN-NEXT: renamable $vgpr2 = IMPLICIT_DEF
; GCN-NEXT: $vgpr3 = SI_SPILL_S32_TO_VGPR $sgpr4, 0, $vgpr3
@@ -116,18 +115,18 @@ body: |
; GCN-NEXT: {{ $}}
; GCN-NEXT: bb.1:
; GCN-NEXT: successors: %bb.2(0x80000000)
- ; GCN-NEXT: liveins: $vgpr2, $vgpr3, $vgpr4, $vgpr5
+ ; GCN-NEXT: liveins: $vcc_hi, $vgpr2, $vgpr3, $vgpr4, $vgpr5
; GCN-NEXT: {{ $}}
; GCN-NEXT: KILL implicit-def $vcc_lo, implicit-def $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15_sgpr16_sgpr17_sgpr18_sgpr19_sgpr20_sgpr21_sgpr22_sgpr23_sgpr24_sgpr25_sgpr26_sgpr27_sgpr28_sgpr29_sgpr30_sgpr31, implicit-def $sgpr32_sgpr33_sgpr34_sgpr35_sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63, implicit-def $sgpr64_sgpr65_sgpr66_sgpr67_sgpr68_sgpr69_sgpr70_sgpr71_sgpr72_sgpr73_sgpr74_sgpr75_sgpr76_sgpr77_sgpr78_sgpr79_sgpr80_sgpr81_sgpr82_sgpr83_sgpr84_sgpr85_sgpr86_sgpr87_sgpr88_sgpr89_sgpr90_sgpr91_sgpr92_sgpr93_sgpr94_sgpr95, implicit-def $sgpr96_sgpr97_sgpr98_sgpr99_sgpr100_sgpr101_sgpr102_sgpr103
; GCN-NEXT: {{ $}}
; GCN-NEXT: bb.2:
; GCN-NEXT: successors: %bb.3(0x80000000)
- ; GCN-NEXT: liveins: $vgpr2, $vgpr3, $vgpr4, $vgpr5
+ ; GCN-NEXT: liveins: $vcc_hi, $vgpr2, $vgpr3, $vgpr4, $vgpr5
; GCN-NEXT: {{ $}}
; GCN-NEXT: $sgpr22 = SI_RESTORE_S32_FROM_VGPR $vgpr2, 0
; GCN-NEXT: {{ $}}
; GCN-NEXT: bb.3:
- ; GCN-NEXT: liveins: $vgpr2, $vgpr3, $vgpr4, $vgpr5
+ ; GCN-NEXT: liveins: $vcc_hi, $vgpr2, $vgpr3, $vgpr4, $vgpr5
; GCN-NEXT: {{ $}}
; GCN-NEXT: $sgpr103 = SI_RESTORE_S32_FROM_VGPR $vgpr5, 3
; GCN-NEXT: $sgpr102 = SI_RESTORE_S32_FROM_VGPR $vgpr5, 2
@@ -198,16 +197,15 @@ body: |
; GCN-NEXT: $sgpr5 = SI_RESTORE_S32_FROM_VGPR $vgpr3, 1
; GCN-NEXT: $sgpr4 = SI_RESTORE_S32_FROM_VGPR $vgpr3, 0
; GCN-NEXT: KILL killed renamable $vgpr2
- ; GCN-NEXT: $sgpr0 = SI_RESTORE_S32_FROM_VGPR $vgpr5, 4
- ; GCN-NEXT: $sgpr1 = S_XOR_SAVEEXEC_B32 -1, implicit-def $exec, implicit-def dead $scc, implicit $exec
+ ; GCN-NEXT: $sgpr0 = S_XOR_SAVEEXEC_B32 -1, implicit-def $exec, implicit-def dead $scc, implicit $exec
; GCN-NEXT: $vgpr3 = SCRATCH_LOAD_DWORD_SADDR $sgpr33, 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from %stack.69, addrspace 5)
; GCN-NEXT: $vgpr4 = SCRATCH_LOAD_DWORD_SADDR $sgpr33, 4, 0, implicit $exec, implicit $flat_scr :: (load (s32) from %stack.70, addrspace 5)
; GCN-NEXT: $vgpr5 = SCRATCH_LOAD_DWORD_SADDR $sgpr33, 8, 0, implicit $exec, implicit $flat_scr :: (load (s32) from %stack.71, addrspace 5)
; GCN-NEXT: $vgpr2 = SCRATCH_LOAD_DWORD_SADDR $sgpr33, 12, 0, implicit $exec, implicit $flat_scr :: (load (s32) from %stack.72, addrspace 5)
- ; GCN-NEXT: $vgpr1 = SCRATCH_LOAD_DWORD_SADDR $sgpr33, 16, 0, implicit $exec, implicit $flat_scr :: (load (s32) from %stack.74, addrspace 5)
- ; GCN-NEXT: $exec_lo = S_MOV_B32 killed $sgpr1
+ ; GCN-NEXT: $vgpr1 = SCRATCH_LOAD_DWORD_SADDR $sgpr33, 16, 0, implicit $exec, implicit $flat_scr :: (load (s32) from %stack.73, addrspace 5)
+ ; GCN-NEXT: $exec_lo = S_MOV_B32 killed $sgpr0
; GCN-NEXT: $sgpr32 = frame-destroy S_ADD_I32 $sgpr32, -24, implicit-def dead $scc
- ; GCN-NEXT: $sgpr33 = COPY $sgpr0
+ ; GCN-NEXT: $sgpr33 = frame-destroy COPY $vcc_hi
; GCN-NEXT: S_ENDPGM 0
bb.0:
liveins: $vgpr0
diff --git a/llvm/test/CodeGen/AMDGPU/vopd-combine.mir b/llvm/test/CodeGen/AMDGPU/vopd-combine.mir
index 63bef40..b8ac50c 100644
--- a/llvm/test/CodeGen/AMDGPU/vopd-combine.mir
+++ b/llvm/test/CodeGen/AMDGPU/vopd-combine.mir
@@ -160,7 +160,7 @@ body: |
; PAIR-GFX11-NEXT: $vgpr3 = IMPLICIT_DEF
; PAIR-GFX11-NEXT: $sgpr20 = IMPLICIT_DEF
; PAIR-GFX11-NEXT: $vgpr4 = V_FMAMK_F32 $sgpr20, 12345, $vgpr3, implicit $mode, implicit $exec
- ; PAIR-GFX11-NEXT: $vgpr2, $vgpr5 = V_DUAL_FMAC_F32_e32_X_CNDMASK_B32_e32_gfx11 $sgpr20, killed $vgpr1, killed $vgpr2, $vgpr0, $vgpr3, implicit $mode, implicit $exec, implicit $vcc, implicit $mode, implicit $exec, implicit $mode, implicit $exec, implicit $vcc_lo
+ ; PAIR-GFX11-NEXT: $vgpr2, $vgpr5 = V_DUAL_FMAC_F32_e32_X_CNDMASK_B32_e32_gfx11 $sgpr20, killed $vgpr1, killed $vgpr2, $vgpr0, $vgpr3, implicit $mode, implicit $exec, implicit $vcc_lo, implicit $mode, implicit $exec, implicit $mode, implicit $exec, implicit $vcc_lo
; PAIR-GFX11-NEXT: $vgpr7 = V_CNDMASK_B32_e32 killed $vgpr0, $vgpr3, implicit $mode, implicit $exec, implicit $vcc_lo
; PAIR-GFX11-NEXT: $vgpr6 = V_ADD_F32_e32 $sgpr20, $vgpr3, implicit $mode, implicit $exec
; PAIR-GFX11-NEXT: $vgpr9 = V_CNDMASK_B32_e32 killed $sgpr20, killed $vgpr3, implicit $mode, implicit $exec, implicit killed $vcc_lo
@@ -174,7 +174,7 @@ body: |
; PAIR-GFX12-NEXT: $vgpr3 = IMPLICIT_DEF
; PAIR-GFX12-NEXT: $sgpr20 = IMPLICIT_DEF
; PAIR-GFX12-NEXT: $vgpr4 = V_FMAMK_F32 $sgpr20, 12345, $vgpr3, implicit $mode, implicit $exec
- ; PAIR-GFX12-NEXT: $vgpr2, $vgpr5 = V_DUAL_FMAC_F32_e32_X_CNDMASK_B32_e32_gfx12 $sgpr20, killed $vgpr1, killed $vgpr2, $vgpr0, $vgpr3, implicit $mode, implicit $exec, implicit $vcc, implicit $mode, implicit $exec, implicit $mode, implicit $exec, implicit $vcc_lo
+ ; PAIR-GFX12-NEXT: $vgpr2, $vgpr5 = V_DUAL_FMAC_F32_e32_X_CNDMASK_B32_e32_gfx12 $sgpr20, killed $vgpr1, killed $vgpr2, $vgpr0, $vgpr3, implicit $mode, implicit $exec, implicit $vcc_lo, implicit $mode, implicit $exec, implicit $mode, implicit $exec, implicit $vcc_lo
; PAIR-GFX12-NEXT: $vgpr7 = V_CNDMASK_B32_e32 killed $vgpr0, $vgpr3, implicit $mode, implicit $exec, implicit $vcc_lo
; PAIR-GFX12-NEXT: $vgpr6 = V_ADD_F32_e32 $sgpr20, $vgpr3, implicit $mode, implicit $exec
; PAIR-GFX12-NEXT: $vgpr9 = V_CNDMASK_B32_e32 killed $sgpr20, killed $vgpr3, implicit $mode, implicit $exec, implicit killed $vcc_lo
@@ -458,9 +458,9 @@ body: |
; PAIR-GFX11-NEXT: $vgpr3, $vgpr6 = V_DUAL_SUB_F32_e32_X_MUL_F32_e32_gfx11 $vgpr1, $vgpr1, $vgpr0, $vgpr0, implicit $mode, implicit $exec, implicit $mode, implicit $exec, implicit $mode, implicit $exec
; PAIR-GFX11-NEXT: $vgpr2 = V_FMAC_F32_e32 10, $vgpr1, killed $vgpr2, implicit $mode, implicit $exec
; PAIR-GFX11-NEXT: $vgpr2 = V_ADD_F32_e32 $vgpr1, $vgpr1, implicit $mode, implicit $exec
- ; PAIR-GFX11-NEXT: $vgpr12, $vgpr19 = V_DUAL_ADD_F32_e32_X_CNDMASK_B32_e32_gfx11 $vgpr1, $vgpr1, $vgpr0, $vgpr3, implicit $mode, implicit $exec, implicit $vcc, implicit $mode, implicit $exec, implicit $mode, implicit $exec, implicit $vcc_lo
+ ; PAIR-GFX11-NEXT: $vgpr12, $vgpr19 = V_DUAL_ADD_F32_e32_X_CNDMASK_B32_e32_gfx11 $vgpr1, $vgpr1, $vgpr0, $vgpr3, implicit $mode, implicit $exec, implicit $vcc_lo, implicit $mode, implicit $exec, implicit $mode, implicit $exec, implicit $vcc_lo
; PAIR-GFX11-NEXT: $vgpr11 = V_CNDMASK_B32_e32 $vgpr0, killed $vgpr3, implicit $mode, implicit $exec, implicit $vcc_lo
- ; PAIR-GFX11-NEXT: $vgpr17, $vgpr10 = V_DUAL_MUL_F32_e32_X_CNDMASK_B32_e32_gfx11 killed $vgpr0, $vgpr0, $vgpr1, $vgpr2, implicit $mode, implicit $exec, implicit $vcc, implicit $mode, implicit $exec, implicit $mode, implicit $exec, implicit $vcc_lo
+ ; PAIR-GFX11-NEXT: $vgpr17, $vgpr10 = V_DUAL_MUL_F32_e32_X_CNDMASK_B32_e32_gfx11 killed $vgpr0, $vgpr0, $vgpr1, $vgpr2, implicit $mode, implicit $exec, implicit $vcc_lo, implicit $mode, implicit $exec, implicit $mode, implicit $exec, implicit $vcc_lo
; PAIR-GFX11-NEXT: $vgpr15 = V_CNDMASK_B32_e32 $vgpr1, killed $vgpr2, implicit $mode, implicit $exec, implicit killed $vcc_lo
; PAIR-GFX11-NEXT: $vgpr16 = V_SUB_F32_e32 $vgpr1, $vgpr1, implicit $mode, implicit $exec
; PAIR-GFX11-NEXT: $vgpr14 = V_SUB_F32_e32 killed $vgpr1, $vgpr1, implicit $mode, implicit $exec
@@ -476,9 +476,9 @@ body: |
; PAIR-GFX12-NEXT: $vgpr3, $vgpr6 = V_DUAL_SUB_F32_e32_X_MUL_F32_e32_gfx12 $vgpr1, $vgpr1, $vgpr0, $vgpr0, implicit $mode, implicit $exec, implicit $mode, implicit $exec, implicit $mode, implicit $exec
; PAIR-GFX12-NEXT: $vgpr2 = V_FMAC_F32_e32 10, $vgpr1, killed $vgpr2, implicit $mode, implicit $exec
; PAIR-GFX12-NEXT: $vgpr2 = V_ADD_F32_e32 $vgpr1, $vgpr1, implicit $mode, implicit $exec
- ; PAIR-GFX12-NEXT: $vgpr12, $vgpr19 = V_DUAL_ADD_F32_e32_X_CNDMASK_B32_e32_gfx12 $vgpr1, $vgpr1, $vgpr0, $vgpr3, implicit $mode, implicit $exec, implicit $vcc, implicit $mode, implicit $exec, implicit $mode, implicit $exec, implicit $vcc_lo
+ ; PAIR-GFX12-NEXT: $vgpr12, $vgpr19 = V_DUAL_ADD_F32_e32_X_CNDMASK_B32_e32_gfx12 $vgpr1, $vgpr1, $vgpr0, $vgpr3, implicit $mode, implicit $exec, implicit $vcc_lo, implicit $mode, implicit $exec, implicit $mode, implicit $exec, implicit $vcc_lo
; PAIR-GFX12-NEXT: $vgpr11 = V_CNDMASK_B32_e32 $vgpr0, killed $vgpr3, implicit $mode, implicit $exec, implicit $vcc_lo
- ; PAIR-GFX12-NEXT: $vgpr17, $vgpr10 = V_DUAL_MUL_F32_e32_X_CNDMASK_B32_e32_gfx12 killed $vgpr0, $vgpr0, $vgpr1, $vgpr2, implicit $mode, implicit $exec, implicit $vcc, implicit $mode, implicit $exec, implicit $mode, implicit $exec, implicit $vcc_lo
+ ; PAIR-GFX12-NEXT: $vgpr17, $vgpr10 = V_DUAL_MUL_F32_e32_X_CNDMASK_B32_e32_gfx12 killed $vgpr0, $vgpr0, $vgpr1, $vgpr2, implicit $mode, implicit $exec, implicit $vcc_lo, implicit $mode, implicit $exec, implicit $mode, implicit $exec, implicit $vcc_lo
; PAIR-GFX12-NEXT: $vgpr15 = V_CNDMASK_B32_e32 $vgpr1, killed $vgpr2, implicit $mode, implicit $exec, implicit killed $vcc_lo
; PAIR-GFX12-NEXT: $vgpr16 = V_SUB_F32_e32 $vgpr1, $vgpr1, implicit $mode, implicit $exec
; PAIR-GFX12-NEXT: $vgpr14 = V_SUB_F32_e32 killed $vgpr1, $vgpr1, implicit $mode, implicit $exec
@@ -559,12 +559,12 @@ body: |
; PAIR-GFX11-NEXT: $vgpr3, $vgpr6 = V_DUAL_SUB_F32_e32_X_MUL_F32_e32_gfx11 $vgpr1, $vgpr1, $vgpr0, $vgpr0, implicit $mode, implicit $exec, implicit $mode, implicit $exec, implicit $mode, implicit $exec
; PAIR-GFX11-NEXT: $vgpr2 = V_FMAC_F32_e32 10, $vgpr1, killed $vgpr2, implicit $mode, implicit $exec
; PAIR-GFX11-NEXT: $vgpr2 = V_ADD_F32_e32 $vgpr1, $vgpr1, implicit $mode, implicit $exec
- ; PAIR-GFX11-NEXT: $vgpr4, $vgpr29 = V_DUAL_SUB_F32_e32_X_CNDMASK_B32_e32_gfx11 $vgpr1, $vgpr1, $vgpr0, $vgpr3, implicit $mode, implicit $exec, implicit $vcc, implicit $mode, implicit $exec, implicit $mode, implicit $exec, implicit $vcc_lo
- ; PAIR-GFX11-NEXT: $vgpr19, $vgpr20 = V_DUAL_CNDMASK_B32_e32_X_FMAC_F32_e32_gfx11 $vgpr0, $vgpr3, 10, $vgpr1, killed $vgpr20, implicit $vcc, implicit $exec, implicit $mode, implicit $mode, implicit $exec, implicit $vcc_lo, implicit $mode, implicit $exec
+ ; PAIR-GFX11-NEXT: $vgpr4, $vgpr29 = V_DUAL_SUB_F32_e32_X_CNDMASK_B32_e32_gfx11 $vgpr1, $vgpr1, $vgpr0, $vgpr3, implicit $mode, implicit $exec, implicit $vcc_lo, implicit $mode, implicit $exec, implicit $mode, implicit $exec, implicit $vcc_lo
+ ; PAIR-GFX11-NEXT: $vgpr19, $vgpr20 = V_DUAL_CNDMASK_B32_e32_X_FMAC_F32_e32_gfx11 $vgpr0, $vgpr3, 10, $vgpr1, killed $vgpr20, implicit $vcc_lo, implicit $exec, implicit $mode, implicit $mode, implicit $exec, implicit $vcc_lo, implicit $mode, implicit $exec
; PAIR-GFX11-NEXT: $vgpr15 = V_CNDMASK_B32_e32 $vgpr1, $vgpr2, implicit $mode, implicit $exec, implicit $vcc_lo
- ; PAIR-GFX11-NEXT: $vgpr10, $vgpr17 = V_DUAL_CNDMASK_B32_e32_X_MUL_F32_e32_gfx11 $vgpr1, $vgpr2, $vgpr0, $vgpr0, implicit $vcc, implicit $exec, implicit $mode, implicit $mode, implicit $exec, implicit $vcc_lo, implicit $mode, implicit $exec
- ; PAIR-GFX11-NEXT: $vgpr11, $vgpr12 = V_DUAL_CNDMASK_B32_e32_X_ADD_F32_e32_gfx11 $vgpr0, $vgpr3, $vgpr1, $vgpr1, implicit $vcc, implicit $exec, implicit $mode, implicit $mode, implicit $exec, implicit $vcc_lo, implicit $mode, implicit $exec
- ; PAIR-GFX11-NEXT: $vgpr37, $vgpr14 = V_DUAL_CNDMASK_B32_e32_X_SUB_F32_e32_gfx11 $vgpr0, killed $vgpr3, $vgpr1, $vgpr1, implicit $vcc, implicit $exec, implicit $mode, implicit $mode, implicit $exec, implicit $vcc_lo, implicit $mode, implicit $exec
+ ; PAIR-GFX11-NEXT: $vgpr10, $vgpr17 = V_DUAL_CNDMASK_B32_e32_X_MUL_F32_e32_gfx11 $vgpr1, $vgpr2, $vgpr0, $vgpr0, implicit $vcc_lo, implicit $exec, implicit $mode, implicit $mode, implicit $exec, implicit $vcc_lo, implicit $mode, implicit $exec
+ ; PAIR-GFX11-NEXT: $vgpr11, $vgpr12 = V_DUAL_CNDMASK_B32_e32_X_ADD_F32_e32_gfx11 $vgpr0, $vgpr3, $vgpr1, $vgpr1, implicit $vcc_lo, implicit $exec, implicit $mode, implicit $mode, implicit $exec, implicit $vcc_lo, implicit $mode, implicit $exec
+ ; PAIR-GFX11-NEXT: $vgpr37, $vgpr14 = V_DUAL_CNDMASK_B32_e32_X_SUB_F32_e32_gfx11 $vgpr0, killed $vgpr3, $vgpr1, $vgpr1, implicit $vcc_lo, implicit $exec, implicit $mode, implicit $mode, implicit $exec, implicit $vcc_lo, implicit $mode, implicit $exec
; PAIR-GFX11-NEXT: $vgpr20 = V_ADD_F32_e32 $vgpr1, $vgpr1, implicit $mode, implicit $exec
; PAIR-GFX11-NEXT: $vgpr21, $vgpr24 = V_DUAL_SUB_F32_e32_X_MUL_F32_e32_gfx11 $vgpr1, $vgpr1, killed $vgpr0, $vgpr0, implicit $mode, implicit $exec, implicit $mode, implicit $exec, implicit $mode, implicit $exec
; PAIR-GFX11-NEXT: $vgpr28 = V_CNDMASK_B32_e32 $vgpr1, $vgpr2, implicit $mode, implicit $exec, implicit $vcc_lo
@@ -586,12 +586,12 @@ body: |
; PAIR-GFX12-NEXT: $vgpr3, $vgpr6 = V_DUAL_SUB_F32_e32_X_MUL_F32_e32_gfx12 $vgpr1, $vgpr1, $vgpr0, $vgpr0, implicit $mode, implicit $exec, implicit $mode, implicit $exec, implicit $mode, implicit $exec
; PAIR-GFX12-NEXT: $vgpr2 = V_FMAC_F32_e32 10, $vgpr1, killed $vgpr2, implicit $mode, implicit $exec
; PAIR-GFX12-NEXT: $vgpr2 = V_ADD_F32_e32 $vgpr1, $vgpr1, implicit $mode, implicit $exec
- ; PAIR-GFX12-NEXT: $vgpr4, $vgpr29 = V_DUAL_SUB_F32_e32_X_CNDMASK_B32_e32_gfx12 $vgpr1, $vgpr1, $vgpr0, $vgpr3, implicit $mode, implicit $exec, implicit $vcc, implicit $mode, implicit $exec, implicit $mode, implicit $exec, implicit $vcc_lo
- ; PAIR-GFX12-NEXT: $vgpr19, $vgpr20 = V_DUAL_CNDMASK_B32_e32_X_FMAC_F32_e32_gfx12 $vgpr0, $vgpr3, 10, $vgpr1, killed $vgpr20, implicit $vcc, implicit $exec, implicit $mode, implicit $mode, implicit $exec, implicit $vcc_lo, implicit $mode, implicit $exec
+ ; PAIR-GFX12-NEXT: $vgpr4, $vgpr29 = V_DUAL_SUB_F32_e32_X_CNDMASK_B32_e32_gfx12 $vgpr1, $vgpr1, $vgpr0, $vgpr3, implicit $mode, implicit $exec, implicit $vcc_lo, implicit $mode, implicit $exec, implicit $mode, implicit $exec, implicit $vcc_lo
+ ; PAIR-GFX12-NEXT: $vgpr19, $vgpr20 = V_DUAL_CNDMASK_B32_e32_X_FMAC_F32_e32_gfx12 $vgpr0, $vgpr3, 10, $vgpr1, killed $vgpr20, implicit $vcc_lo, implicit $exec, implicit $mode, implicit $mode, implicit $exec, implicit $vcc_lo, implicit $mode, implicit $exec
; PAIR-GFX12-NEXT: $vgpr15 = V_CNDMASK_B32_e32 $vgpr1, $vgpr2, implicit $mode, implicit $exec, implicit $vcc_lo
- ; PAIR-GFX12-NEXT: $vgpr10, $vgpr17 = V_DUAL_CNDMASK_B32_e32_X_MUL_F32_e32_gfx12 $vgpr1, $vgpr2, $vgpr0, $vgpr0, implicit $vcc, implicit $exec, implicit $mode, implicit $mode, implicit $exec, implicit $vcc_lo, implicit $mode, implicit $exec
- ; PAIR-GFX12-NEXT: $vgpr11, $vgpr12 = V_DUAL_CNDMASK_B32_e32_X_ADD_F32_e32_gfx12 $vgpr0, $vgpr3, $vgpr1, $vgpr1, implicit $vcc, implicit $exec, implicit $mode, implicit $mode, implicit $exec, implicit $vcc_lo, implicit $mode, implicit $exec
- ; PAIR-GFX12-NEXT: $vgpr37, $vgpr14 = V_DUAL_CNDMASK_B32_e32_X_SUB_F32_e32_gfx12 $vgpr0, killed $vgpr3, $vgpr1, $vgpr1, implicit $vcc, implicit $exec, implicit $mode, implicit $mode, implicit $exec, implicit $vcc_lo, implicit $mode, implicit $exec
+ ; PAIR-GFX12-NEXT: $vgpr10, $vgpr17 = V_DUAL_CNDMASK_B32_e32_X_MUL_F32_e32_gfx12 $vgpr1, $vgpr2, $vgpr0, $vgpr0, implicit $vcc_lo, implicit $exec, implicit $mode, implicit $mode, implicit $exec, implicit $vcc_lo, implicit $mode, implicit $exec
+ ; PAIR-GFX12-NEXT: $vgpr11, $vgpr12 = V_DUAL_CNDMASK_B32_e32_X_ADD_F32_e32_gfx12 $vgpr0, $vgpr3, $vgpr1, $vgpr1, implicit $vcc_lo, implicit $exec, implicit $mode, implicit $mode, implicit $exec, implicit $vcc_lo, implicit $mode, implicit $exec
+ ; PAIR-GFX12-NEXT: $vgpr37, $vgpr14 = V_DUAL_CNDMASK_B32_e32_X_SUB_F32_e32_gfx12 $vgpr0, killed $vgpr3, $vgpr1, $vgpr1, implicit $vcc_lo, implicit $exec, implicit $mode, implicit $mode, implicit $exec, implicit $vcc_lo, implicit $mode, implicit $exec
; PAIR-GFX12-NEXT: $vgpr20 = V_ADD_F32_e32 $vgpr1, $vgpr1, implicit $mode, implicit $exec
; PAIR-GFX12-NEXT: $vgpr21, $vgpr24 = V_DUAL_SUB_F32_e32_X_MUL_F32_e32_gfx12 $vgpr1, $vgpr1, killed $vgpr0, $vgpr0, implicit $mode, implicit $exec, implicit $mode, implicit $exec, implicit $mode, implicit $exec
; PAIR-GFX12-NEXT: $vgpr28 = V_CNDMASK_B32_e32 $vgpr1, $vgpr2, implicit $mode, implicit $exec, implicit $vcc_lo
diff --git a/llvm/test/CodeGen/Hexagon/vect/zext-v4i1.ll b/llvm/test/CodeGen/Hexagon/vect/zext-v4i1.ll
index dddc4bd..c33c818 100644
--- a/llvm/test/CodeGen/Hexagon/vect/zext-v4i1.ll
+++ b/llvm/test/CodeGen/Hexagon/vect/zext-v4i1.ll
@@ -10,12 +10,13 @@ define i32 @fred(ptr %a0) #0 {
; CHECK-LABEL: fred:
; CHECK: // %bb.0: // %b0
; CHECK-NEXT: {
-; CHECK-NEXT: if (p0) jump:nt .LBB0_2
+; CHECK-NEXT: r1:0 = combine(r0,#0)
+; CHECK-NEXT: if (p0) jumpr r31
; CHECK-NEXT: }
-; CHECK-NEXT: // %bb.1: // %b2
+; CHECK-NEXT: .LBB0_1: // %b2
; CHECK-NEXT: {
; CHECK-NEXT: r3:2 = combine(#0,#0)
-; CHECK-NEXT: r1:0 = memd(r0+#0)
+; CHECK-NEXT: r1:0 = memd(r1+#0)
; CHECK-NEXT: }
; CHECK-NEXT: {
; CHECK-NEXT: p0 = vcmph.eq(r1:0,r3:2)
@@ -27,16 +28,7 @@ define i32 @fred(ptr %a0) #0 {
; CHECK-NEXT: r0 = and(r0,#1)
; CHECK-NEXT: }
; CHECK-NEXT: {
-; CHECK-NEXT: p0 = cmp.eq(r0,#11)
-; CHECK-NEXT: r0 = #1
-; CHECK-NEXT: }
-; CHECK-NEXT: {
-; CHECK-NEXT: if (p0) r0 = #0
-; CHECK-NEXT: jumpr r31
-; CHECK-NEXT: }
-; CHECK-NEXT: .LBB0_2: // %b14
-; CHECK-NEXT: {
-; CHECK-NEXT: r0 = #0
+; CHECK-NEXT: r0 = !cmp.eq(r0,#11)
; CHECK-NEXT: jumpr r31
; CHECK-NEXT: }
b0:
diff --git a/llvm/test/CodeGen/RISCV/prefer-w-inst.ll b/llvm/test/CodeGen/RISCV/prefer-w-inst.ll
new file mode 100644
index 0000000..34ab74d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/prefer-w-inst.ll
@@ -0,0 +1,105 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefixes=NO-PREFER-W-INST %s
+; RUN: llc -mtriple=riscv64 -mattr=+m -riscv-disable-strip-w-suffix -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefixes=NO-STRIP %s
+; RUN: llc -mtriple=riscv64 -mattr=+m,+prefer-w-inst -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefixes=PREFER-W-INST %s
+
+define i32 @addiw(i32 %a) {
+; NO-PREFER-W-INST-LABEL: addiw:
+; NO-PREFER-W-INST: # %bb.0:
+; NO-PREFER-W-INST-NEXT: lui a1, 1
+; NO-PREFER-W-INST-NEXT: addi a1, a1, -1
+; NO-PREFER-W-INST-NEXT: addw a0, a0, a1
+; NO-PREFER-W-INST-NEXT: ret
+;
+; NO-STRIP-LABEL: addiw:
+; NO-STRIP: # %bb.0:
+; NO-STRIP-NEXT: lui a1, 1
+; NO-STRIP-NEXT: addiw a1, a1, -1
+; NO-STRIP-NEXT: addw a0, a0, a1
+; NO-STRIP-NEXT: ret
+;
+; PREFER-W-INST-LABEL: addiw:
+; PREFER-W-INST: # %bb.0:
+; PREFER-W-INST-NEXT: lui a1, 1
+; PREFER-W-INST-NEXT: addiw a1, a1, -1
+; PREFER-W-INST-NEXT: addw a0, a0, a1
+; PREFER-W-INST-NEXT: ret
+ %ret = add i32 %a, 4095
+ ret i32 %ret
+}
+
+define i32 @addw(i32 %a, i32 %b) {
+; NO-PREFER-W-INST-LABEL: addw:
+; NO-PREFER-W-INST: # %bb.0:
+; NO-PREFER-W-INST-NEXT: add a0, a0, a1
+; NO-PREFER-W-INST-NEXT: addiw a0, a0, 1024
+; NO-PREFER-W-INST-NEXT: ret
+;
+; NO-STRIP-LABEL: addw:
+; NO-STRIP: # %bb.0:
+; NO-STRIP-NEXT: addw a0, a0, a1
+; NO-STRIP-NEXT: addiw a0, a0, 1024
+; NO-STRIP-NEXT: ret
+;
+; PREFER-W-INST-LABEL: addw:
+; PREFER-W-INST: # %bb.0:
+; PREFER-W-INST-NEXT: addw a0, a0, a1
+; PREFER-W-INST-NEXT: addiw a0, a0, 1024
+; PREFER-W-INST-NEXT: ret
+ %add = add i32 %a, %b
+ %ret = add i32 %add, 1024
+ ret i32 %ret
+}
+
+define i32 @mulw(i32 %a, i32 %b) {
+; NO-PREFER-W-INST-LABEL: mulw:
+; NO-PREFER-W-INST: # %bb.0:
+; NO-PREFER-W-INST-NEXT: mul a1, a0, a1
+; NO-PREFER-W-INST-NEXT: mul a0, a0, a1
+; NO-PREFER-W-INST-NEXT: addiw a0, a0, 1024
+; NO-PREFER-W-INST-NEXT: ret
+;
+; NO-STRIP-LABEL: mulw:
+; NO-STRIP: # %bb.0:
+; NO-STRIP-NEXT: mulw a1, a0, a1
+; NO-STRIP-NEXT: mulw a0, a0, a1
+; NO-STRIP-NEXT: addiw a0, a0, 1024
+; NO-STRIP-NEXT: ret
+;
+; PREFER-W-INST-LABEL: mulw:
+; PREFER-W-INST: # %bb.0:
+; PREFER-W-INST-NEXT: mulw a1, a0, a1
+; PREFER-W-INST-NEXT: mulw a0, a0, a1
+; PREFER-W-INST-NEXT: addiw a0, a0, 1024
+; PREFER-W-INST-NEXT: ret
+ %mul1 = mul i32 %a, %b
+ %mul = mul i32 %a, %mul1
+ %ret = add i32 %mul, 1024
+ ret i32 %ret
+}
+
+define i32 @slliw(i32 %a) {
+; NO-PREFER-W-INST-LABEL: slliw:
+; NO-PREFER-W-INST: # %bb.0:
+; NO-PREFER-W-INST-NEXT: slli a0, a0, 1
+; NO-PREFER-W-INST-NEXT: addiw a0, a0, 1024
+; NO-PREFER-W-INST-NEXT: ret
+;
+; NO-STRIP-LABEL: slliw:
+; NO-STRIP: # %bb.0:
+; NO-STRIP-NEXT: slliw a0, a0, 1
+; NO-STRIP-NEXT: addiw a0, a0, 1024
+; NO-STRIP-NEXT: ret
+;
+; PREFER-W-INST-LABEL: slliw:
+; PREFER-W-INST: # %bb.0:
+; PREFER-W-INST-NEXT: slliw a0, a0, 1
+; PREFER-W-INST-NEXT: addiw a0, a0, 1024
+; PREFER-W-INST-NEXT: ret
+ %shl = shl i32 %a, 1
+ %ret = add i32 %shl, 1024
+ ret i32 %ret
+}
diff --git a/llvm/test/CodeGen/RISCV/prefer-w-inst.mir b/llvm/test/CodeGen/RISCV/prefer-w-inst.mir
new file mode 100644
index 0000000..e05e27a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/prefer-w-inst.mir
@@ -0,0 +1,262 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+# RUN: llc %s -mtriple=riscv64 -run-pass=riscv-opt-w-instrs -verify-machineinstrs \
+# RUN: -mattr=+m -o - | FileCheck %s -check-prefixes=NO-PREFER-W-INST
+# RUN: llc %s -mtriple=riscv64 -run-pass=riscv-opt-w-instrs -verify-machineinstrs \
+# RUN: -mattr=+m,+prefer-w-inst -o - | FileCheck %s -check-prefixes=PREFER-W-INST
+
+---
+name: addi
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ ; NO-PREFER-W-INST-LABEL: name: addi
+ ; NO-PREFER-W-INST: liveins: $x10, $x11
+ ; NO-PREFER-W-INST-NEXT: {{ $}}
+ ; NO-PREFER-W-INST-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+ ; NO-PREFER-W-INST-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+ ; NO-PREFER-W-INST-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI [[COPY]], 1
+ ; NO-PREFER-W-INST-NEXT: [[ADDIW:%[0-9]+]]:gpr = ADDIW [[ADDI]], 1
+ ; NO-PREFER-W-INST-NEXT: $x10 = COPY [[ADDIW]]
+ ; NO-PREFER-W-INST-NEXT: PseudoRET
+ ;
+ ; PREFER-W-INST-LABEL: name: addi
+ ; PREFER-W-INST: liveins: $x10, $x11
+ ; PREFER-W-INST-NEXT: {{ $}}
+ ; PREFER-W-INST-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+ ; PREFER-W-INST-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+ ; PREFER-W-INST-NEXT: [[ADDIW:%[0-9]+]]:gpr = ADDIW [[COPY]], 1
+ ; PREFER-W-INST-NEXT: [[ADDIW1:%[0-9]+]]:gpr = ADDIW [[ADDIW]], 1
+ ; PREFER-W-INST-NEXT: $x10 = COPY [[ADDIW1]]
+ ; PREFER-W-INST-NEXT: PseudoRET
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = ADDI %1, 1
+ %4:gpr = ADDIW %3, 1
+ $x10 = COPY %4
+ PseudoRET
+...
+
+---
+name: add
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ ; NO-PREFER-W-INST-LABEL: name: add
+ ; NO-PREFER-W-INST: liveins: $x10, $x11
+ ; NO-PREFER-W-INST-NEXT: {{ $}}
+ ; NO-PREFER-W-INST-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+ ; NO-PREFER-W-INST-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+ ; NO-PREFER-W-INST-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[COPY]], [[COPY1]]
+ ; NO-PREFER-W-INST-NEXT: [[ADDIW:%[0-9]+]]:gpr = ADDIW [[ADD]], 1
+ ; NO-PREFER-W-INST-NEXT: $x10 = COPY [[ADDIW]]
+ ; NO-PREFER-W-INST-NEXT: PseudoRET
+ ;
+ ; PREFER-W-INST-LABEL: name: add
+ ; PREFER-W-INST: liveins: $x10, $x11
+ ; PREFER-W-INST-NEXT: {{ $}}
+ ; PREFER-W-INST-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+ ; PREFER-W-INST-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+ ; PREFER-W-INST-NEXT: [[ADDW:%[0-9]+]]:gpr = ADDW [[COPY]], [[COPY1]]
+ ; PREFER-W-INST-NEXT: [[ADDIW:%[0-9]+]]:gpr = ADDIW [[ADDW]], 1
+ ; PREFER-W-INST-NEXT: $x10 = COPY [[ADDIW]]
+ ; PREFER-W-INST-NEXT: PseudoRET
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = ADD %1, %2
+ %4:gpr = ADDIW %3, 1
+ $x10 = COPY %4
+ PseudoRET
+...
+
+---
+name: sub
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ ; NO-PREFER-W-INST-LABEL: name: sub
+ ; NO-PREFER-W-INST: liveins: $x10, $x11
+ ; NO-PREFER-W-INST-NEXT: {{ $}}
+ ; NO-PREFER-W-INST-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+ ; NO-PREFER-W-INST-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+ ; NO-PREFER-W-INST-NEXT: [[SUB:%[0-9]+]]:gpr = SUB [[COPY]], [[COPY1]]
+ ; NO-PREFER-W-INST-NEXT: [[ADDIW:%[0-9]+]]:gpr = ADDIW [[SUB]], 1
+ ; NO-PREFER-W-INST-NEXT: $x10 = COPY [[ADDIW]]
+ ; NO-PREFER-W-INST-NEXT: PseudoRET
+ ;
+ ; PREFER-W-INST-LABEL: name: sub
+ ; PREFER-W-INST: liveins: $x10, $x11
+ ; PREFER-W-INST-NEXT: {{ $}}
+ ; PREFER-W-INST-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+ ; PREFER-W-INST-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+ ; PREFER-W-INST-NEXT: [[SUBW:%[0-9]+]]:gpr = SUBW [[COPY]], [[COPY1]]
+ ; PREFER-W-INST-NEXT: [[ADDIW:%[0-9]+]]:gpr = ADDIW [[SUBW]], 1
+ ; PREFER-W-INST-NEXT: $x10 = COPY [[ADDIW]]
+ ; PREFER-W-INST-NEXT: PseudoRET
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SUB %1, %2
+ %4:gpr = ADDIW %3, 1
+ $x10 = COPY %4
+ PseudoRET
+...
+
+---
+name: mul
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ ; NO-PREFER-W-INST-LABEL: name: mul
+ ; NO-PREFER-W-INST: liveins: $x10, $x11
+ ; NO-PREFER-W-INST-NEXT: {{ $}}
+ ; NO-PREFER-W-INST-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+ ; NO-PREFER-W-INST-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+ ; NO-PREFER-W-INST-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[COPY]], [[COPY1]]
+ ; NO-PREFER-W-INST-NEXT: [[ADDIW:%[0-9]+]]:gpr = ADDIW [[MUL]], 1
+ ; NO-PREFER-W-INST-NEXT: $x10 = COPY [[ADDIW]]
+ ; NO-PREFER-W-INST-NEXT: PseudoRET
+ ;
+ ; PREFER-W-INST-LABEL: name: mul
+ ; PREFER-W-INST: liveins: $x10, $x11
+ ; PREFER-W-INST-NEXT: {{ $}}
+ ; PREFER-W-INST-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+ ; PREFER-W-INST-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+ ; PREFER-W-INST-NEXT: [[MULW:%[0-9]+]]:gpr = MULW [[COPY]], [[COPY1]]
+ ; PREFER-W-INST-NEXT: [[ADDIW:%[0-9]+]]:gpr = ADDIW [[MULW]], 1
+ ; PREFER-W-INST-NEXT: $x10 = COPY [[ADDIW]]
+ ; PREFER-W-INST-NEXT: PseudoRET
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = MUL %1, %2
+ %4:gpr = ADDIW %3, 1
+ $x10 = COPY %4
+ PseudoRET
+...
+
+
+---
+name: slli_31
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ ; NO-PREFER-W-INST-LABEL: name: slli_31
+ ; NO-PREFER-W-INST: liveins: $x10, $x11
+ ; NO-PREFER-W-INST-NEXT: {{ $}}
+ ; NO-PREFER-W-INST-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+ ; NO-PREFER-W-INST-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+ ; NO-PREFER-W-INST-NEXT: [[SLLI:%[0-9]+]]:gpr = SLLI [[COPY]], 31
+ ; NO-PREFER-W-INST-NEXT: [[ADDIW:%[0-9]+]]:gpr = ADDIW [[SLLI]], 1
+ ; NO-PREFER-W-INST-NEXT: $x10 = COPY [[ADDIW]]
+ ; NO-PREFER-W-INST-NEXT: PseudoRET
+ ;
+ ; PREFER-W-INST-LABEL: name: slli_31
+ ; PREFER-W-INST: liveins: $x10, $x11
+ ; PREFER-W-INST-NEXT: {{ $}}
+ ; PREFER-W-INST-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+ ; PREFER-W-INST-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+ ; PREFER-W-INST-NEXT: [[SLLIW:%[0-9]+]]:gpr = SLLIW [[COPY]], 31
+ ; PREFER-W-INST-NEXT: [[ADDIW:%[0-9]+]]:gpr = ADDIW [[SLLIW]], 1
+ ; PREFER-W-INST-NEXT: $x10 = COPY [[ADDIW]]
+ ; PREFER-W-INST-NEXT: PseudoRET
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SLLI %1, 31
+ %4:gpr = ADDIW %3, 1
+ $x10 = COPY %4
+ PseudoRET
+...
+
+---
+name: slli_32
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ ; NO-PREFER-W-INST-LABEL: name: slli_32
+ ; NO-PREFER-W-INST: liveins: $x10, $x11
+ ; NO-PREFER-W-INST-NEXT: {{ $}}
+ ; NO-PREFER-W-INST-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+ ; NO-PREFER-W-INST-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+ ; NO-PREFER-W-INST-NEXT: [[SLLI:%[0-9]+]]:gpr = SLLI [[COPY]], 32
+ ; NO-PREFER-W-INST-NEXT: [[ADDIW:%[0-9]+]]:gpr = ADDIW [[SLLI]], 1
+ ; NO-PREFER-W-INST-NEXT: $x10 = COPY [[ADDIW]]
+ ; NO-PREFER-W-INST-NEXT: PseudoRET
+ ;
+ ; PREFER-W-INST-LABEL: name: slli_32
+ ; PREFER-W-INST: liveins: $x10, $x11
+ ; PREFER-W-INST-NEXT: {{ $}}
+ ; PREFER-W-INST-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+ ; PREFER-W-INST-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+ ; PREFER-W-INST-NEXT: [[SLLI:%[0-9]+]]:gpr = SLLI [[COPY]], 32
+ ; PREFER-W-INST-NEXT: [[ADDIW:%[0-9]+]]:gpr = ADDIW [[SLLI]], 1
+ ; PREFER-W-INST-NEXT: $x10 = COPY [[ADDIW]]
+ ; PREFER-W-INST-NEXT: PseudoRET
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SLLI %1, 32
+ %4:gpr = ADDIW %3, 1
+ $x10 = COPY %4
+ PseudoRET
+...
+
+---
+name: ld
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ ; NO-PREFER-W-INST-LABEL: name: ld
+ ; NO-PREFER-W-INST: liveins: $x10, $x11
+ ; NO-PREFER-W-INST-NEXT: {{ $}}
+ ; NO-PREFER-W-INST-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+ ; NO-PREFER-W-INST-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+ ; NO-PREFER-W-INST-NEXT: [[LD:%[0-9]+]]:gpr = LD [[COPY]], 0
+ ; NO-PREFER-W-INST-NEXT: [[ADDIW:%[0-9]+]]:gpr = ADDIW [[LD]], 1
+ ; NO-PREFER-W-INST-NEXT: $x10 = COPY [[ADDIW]]
+ ; NO-PREFER-W-INST-NEXT: PseudoRET
+ ;
+ ; PREFER-W-INST-LABEL: name: ld
+ ; PREFER-W-INST: liveins: $x10, $x11
+ ; PREFER-W-INST-NEXT: {{ $}}
+ ; PREFER-W-INST-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+ ; PREFER-W-INST-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+ ; PREFER-W-INST-NEXT: [[LW:%[0-9]+]]:gpr = LW [[COPY]], 0
+ ; PREFER-W-INST-NEXT: [[ADDIW:%[0-9]+]]:gpr = ADDIW [[LW]], 1
+ ; PREFER-W-INST-NEXT: $x10 = COPY [[ADDIW]]
+ ; PREFER-W-INST-NEXT: PseudoRET
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = LD %1, 0
+ %4:gpr = ADDIW %3, 1
+ $x10 = COPY %4
+ PseudoRET
+...
+
+---
+name: lwu
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ ; NO-PREFER-W-INST-LABEL: name: lwu
+ ; NO-PREFER-W-INST: liveins: $x10, $x11
+ ; NO-PREFER-W-INST-NEXT: {{ $}}
+ ; NO-PREFER-W-INST-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+ ; NO-PREFER-W-INST-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+ ; NO-PREFER-W-INST-NEXT: [[LWU:%[0-9]+]]:gpr = LWU [[COPY]], 0
+ ; NO-PREFER-W-INST-NEXT: [[ADDIW:%[0-9]+]]:gpr = ADDIW [[LWU]], 1
+ ; NO-PREFER-W-INST-NEXT: $x10 = COPY [[ADDIW]]
+ ; NO-PREFER-W-INST-NEXT: PseudoRET
+ ;
+ ; PREFER-W-INST-LABEL: name: lwu
+ ; PREFER-W-INST: liveins: $x10, $x11
+ ; PREFER-W-INST-NEXT: {{ $}}
+ ; PREFER-W-INST-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
+ ; PREFER-W-INST-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x11
+ ; PREFER-W-INST-NEXT: [[LW:%[0-9]+]]:gpr = LW [[COPY]], 0
+ ; PREFER-W-INST-NEXT: [[ADDIW:%[0-9]+]]:gpr = ADDIW [[LW]], 1
+ ; PREFER-W-INST-NEXT: $x10 = COPY [[ADDIW]]
+ ; PREFER-W-INST-NEXT: PseudoRET
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = LWU %1, 0
+ %4:gpr = ADDIW %3, 1
+ $x10 = COPY %4
+ PseudoRET
+...
diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
index 78e8700..647d315 100644
--- a/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv.ll
@@ -162,3 +162,206 @@ define void @caller_tuple_argument({<vscale x 4 x i32>, <vscale x 4 x i32>} %x)
}
declare void @callee_tuple_argument({<vscale x 4 x i32>, <vscale x 4 x i32>})
+
+; %0 -> v8
+; %1 -> v9
+define <vscale x 1 x i64> @case1(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1) {
+; CHECK-LABEL: case1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %a = add <vscale x 1 x i64> %0, %1
+ ret <vscale x 1 x i64> %a
+}
+
+; %0 -> v8
+; %1 -> v10-v11
+; %2 -> v9
+define <vscale x 1 x i64> @case2_1(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2) {
+; CHECK-LABEL: case2_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %a = add <vscale x 1 x i64> %0, %2
+ ret <vscale x 1 x i64> %a
+}
+define <vscale x 2 x i64> @case2_2(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2) {
+; CHECK-LABEL: case2_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; CHECK-NEXT: vadd.vv v8, v10, v10
+; CHECK-NEXT: ret
+ %a = add <vscale x 2 x i64> %1, %1
+ ret <vscale x 2 x i64> %a
+}
+
+; %0 -> v8
+; %1 -> {v10-v11, v12-v13}
+; %2 -> v9
+define <vscale x 1 x i64> @case3_1(<vscale x 1 x i64> %0, {<vscale x 2 x i64>, <vscale x 2 x i64>} %1, <vscale x 1 x i64> %2) {
+; CHECK-LABEL: case3_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %add = add <vscale x 1 x i64> %0, %2
+ ret <vscale x 1 x i64> %add
+}
+define <vscale x 2 x i64> @case3_2(<vscale x 1 x i64> %0, {<vscale x 2 x i64>, <vscale x 2 x i64>} %1, <vscale x 1 x i64> %2) {
+; CHECK-LABEL: case3_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; CHECK-NEXT: vadd.vv v8, v10, v12
+; CHECK-NEXT: ret
+ %a = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } %1, 0
+ %b = extractvalue { <vscale x 2 x i64>, <vscale x 2 x i64> } %1, 1
+ %add = add <vscale x 2 x i64> %a, %b
+ ret <vscale x 2 x i64> %add
+}
+
+; %0 -> v8
+; %1 -> {by-ref, by-ref}
+; %2 -> v9
+define <vscale x 8 x i64> @case4_1(<vscale x 1 x i64> %0, {<vscale x 8 x i64>, <vscale x 8 x i64>} %1, <vscale x 1 x i64> %2) {
+; CHECK-LABEL: case4_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, a0, a1
+; CHECK-NEXT: vl8re64.v v8, (a1)
+; CHECK-NEXT: vl8re64.v v16, (a0)
+; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; CHECK-NEXT: vadd.vv v8, v16, v8
+; CHECK-NEXT: ret
+ %a = extractvalue { <vscale x 8 x i64>, <vscale x 8 x i64> } %1, 0
+ %b = extractvalue { <vscale x 8 x i64>, <vscale x 8 x i64> } %1, 1
+ %add = add <vscale x 8 x i64> %a, %b
+ ret <vscale x 8 x i64> %add
+}
+define <vscale x 1 x i64> @case4_2(<vscale x 1 x i64> %0, {<vscale x 8 x i64>, <vscale x 8 x i64>} %1, <vscale x 1 x i64> %2) {
+; CHECK-LABEL: case4_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: ret
+ %add = add <vscale x 1 x i64> %0, %2
+ ret <vscale x 1 x i64> %add
+}
+
+declare <vscale x 1 x i64> @callee1()
+declare void @callee2(<vscale x 1 x i64>)
+declare void @callee3(<vscale x 4 x i32>)
+define void @caller() {
+; RV32-LABEL: caller:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: call callee1
+; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV32-NEXT: vadd.vv v8, v8, v8
+; RV32-NEXT: call callee2
+; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: caller:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: .cfi_def_cfa_offset 16
+; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: call callee1
+; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV64-NEXT: vadd.vv v8, v8, v8
+; RV64-NEXT: call callee2
+; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 16
+; RV64-NEXT: ret
+ %a = call <vscale x 1 x i64> @callee1()
+ %add = add <vscale x 1 x i64> %a, %a
+ call void @callee2(<vscale x 1 x i64> %add)
+ ret void
+}
+
+declare {<vscale x 4 x i32>, <vscale x 4 x i32>} @callee_tuple()
+define void @caller_tuple() {
+; RV32-LABEL: caller_tuple:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: call callee_tuple
+; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; RV32-NEXT: vadd.vv v8, v8, v10
+; RV32-NEXT: call callee3
+; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: caller_tuple:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: .cfi_def_cfa_offset 16
+; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: call callee_tuple
+; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; RV64-NEXT: vadd.vv v8, v8, v10
+; RV64-NEXT: call callee3
+; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 16
+; RV64-NEXT: ret
+ %a = call {<vscale x 4 x i32>, <vscale x 4 x i32>} @callee_tuple()
+ %b = extractvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} %a, 0
+ %c = extractvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} %a, 1
+ %add = add <vscale x 4 x i32> %b, %c
+ call void @callee3(<vscale x 4 x i32> %add)
+ ret void
+}
+
+declare {<vscale x 4 x i32>, {<vscale x 4 x i32>, <vscale x 4 x i32>}} @callee_nested()
+define void @caller_nested() {
+; RV32-LABEL: caller_nested:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: call callee_nested
+; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; RV32-NEXT: vadd.vv v8, v8, v10
+; RV32-NEXT: vadd.vv v8, v8, v12
+; RV32-NEXT: call callee3
+; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: caller_nested:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: .cfi_def_cfa_offset 16
+; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: call callee_nested
+; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma
+; RV64-NEXT: vadd.vv v8, v8, v10
+; RV64-NEXT: vadd.vv v8, v8, v12
+; RV64-NEXT: call callee3
+; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 16
+; RV64-NEXT: ret
+ %a = call {<vscale x 4 x i32>, {<vscale x 4 x i32>, <vscale x 4 x i32>}} @callee_nested()
+ %b = extractvalue {<vscale x 4 x i32>, {<vscale x 4 x i32>, <vscale x 4 x i32>}} %a, 0
+ %c = extractvalue {<vscale x 4 x i32>, {<vscale x 4 x i32>, <vscale x 4 x i32>}} %a, 1
+ %c0 = extractvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} %c, 0
+ %c1 = extractvalue {<vscale x 4 x i32>, <vscale x 4 x i32>} %c, 1
+ %add0 = add <vscale x 4 x i32> %b, %c0
+ %add1 = add <vscale x 4 x i32> %add0, %c1
+ call void @callee3(<vscale x 4 x i32> %add1)
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll
index 4332bf3..f3ce218 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll
@@ -19,9 +19,10 @@ define {<vscale x 16 x i1>, <vscale x 16 x i1>} @vector_deinterleave_load_nxv16i
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmerge.vim v12, v10, 1, v0
; CHECK-NEXT: vnsrl.wi v8, v12, 0
-; CHECK-NEXT: vmsne.vi v0, v8, 0
-; CHECK-NEXT: vnsrl.wi v8, v12, 8
; CHECK-NEXT: vmsne.vi v8, v8, 0
+; CHECK-NEXT: vnsrl.wi v10, v12, 8
+; CHECK-NEXT: vmsne.vi v0, v10, 0
+; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: ret
%vec = load <vscale x 32 x i1>, ptr %p
%retval = call {<vscale x 16 x i1>, <vscale x 16 x i1>} @llvm.experimental.vector.deinterleave2.nxv32i1(<vscale x 32 x i1> %vec)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
index ef4baf3..1347dfb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
@@ -8,18 +8,19 @@ define {<vscale x 16 x i1>, <vscale x 16 x i1>} @vector_deinterleave_nxv16i1_nxv
; CHECK-LABEL: vector_deinterleave_nxv16i1_nxv32i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmv.v.i v10, 0
-; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v12, v8, 1, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vx v0, v0, a0
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
-; CHECK-NEXT: vnsrl.wi v12, v8, 0
-; CHECK-NEXT: vmsne.vi v0, v12, 0
-; CHECK-NEXT: vnsrl.wi v12, v8, 8
-; CHECK-NEXT: vmsne.vi v8, v12, 0
+; CHECK-NEXT: vmerge.vim v14, v8, 1, v0
+; CHECK-NEXT: vnsrl.wi v8, v12, 0
+; CHECK-NEXT: vmsne.vi v8, v8, 0
+; CHECK-NEXT: vnsrl.wi v10, v12, 8
+; CHECK-NEXT: vmsne.vi v0, v10, 0
+; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: ret
%retval = call {<vscale x 16 x i1>, <vscale x 16 x i1>} @llvm.experimental.vector.deinterleave2.nxv32i1(<vscale x 32 x i1> %vec)
ret {<vscale x 16 x i1>, <vscale x 16 x i1>} %retval
@@ -107,7 +108,9 @@ define {<vscale x 64 x i1>, <vscale x 64 x i1>} @vector_deinterleave_nxv64i1_nxv
; CHECK-NEXT: vnsrl.wi v24, v16, 8
; CHECK-NEXT: vnsrl.wi v28, v8, 8
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
-; CHECK-NEXT: vmsne.vi v8, v24, 0
+; CHECK-NEXT: vmsne.vi v16, v24, 0
+; CHECK-NEXT: vmv1r.v v8, v0
+; CHECK-NEXT: vmv1r.v v9, v16
; CHECK-NEXT: ret
%retval = call {<vscale x 64 x i1>, <vscale x 64 x i1>} @llvm.experimental.vector.deinterleave2.nxv128i1(<vscale x 128 x i1> %vec)
ret {<vscale x 64 x i1>, <vscale x 64 x i1>} %retval
diff --git a/llvm/test/CodeGen/RISCV/strip-w-suffix.ll b/llvm/test/CodeGen/RISCV/strip-w-suffix.ll
deleted file mode 100644
index 4124b3d..0000000
--- a/llvm/test/CodeGen/RISCV/strip-w-suffix.ll
+++ /dev/null
@@ -1,74 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s \
-; RUN: | FileCheck -check-prefixes=STRIP %s
-; RUN: llc -mtriple=riscv64 -mattr=+m,+no-strip-w-suffix -verify-machineinstrs < %s \
-; RUN: | FileCheck -check-prefixes=NO-STRIP %s
-
-define i32 @addiw(i32 %a) {
-; STRIP-LABEL: addiw:
-; STRIP: # %bb.0:
-; STRIP-NEXT: lui a1, 1
-; STRIP-NEXT: addi a1, a1, -1
-; STRIP-NEXT: addw a0, a0, a1
-; STRIP-NEXT: ret
-;
-; NO-STRIP-LABEL: addiw:
-; NO-STRIP: # %bb.0:
-; NO-STRIP-NEXT: lui a1, 1
-; NO-STRIP-NEXT: addiw a1, a1, -1
-; NO-STRIP-NEXT: addw a0, a0, a1
-; NO-STRIP-NEXT: ret
- %ret = add i32 %a, 4095
- ret i32 %ret
-}
-
-define i32 @addw(i32 %a, i32 %b) {
-; STRIP-LABEL: addw:
-; STRIP: # %bb.0:
-; STRIP-NEXT: add a0, a0, a1
-; STRIP-NEXT: addiw a0, a0, 1024
-; STRIP-NEXT: ret
-;
-; NO-STRIP-LABEL: addw:
-; NO-STRIP: # %bb.0:
-; NO-STRIP-NEXT: addw a0, a0, a1
-; NO-STRIP-NEXT: addiw a0, a0, 1024
-; NO-STRIP-NEXT: ret
- %add = add i32 %a, %b
- %ret = add i32 %add, 1024
- ret i32 %ret
-}
-
-define i32 @mulw(i32 %a, i32 %b) {
-; STRIP-LABEL: mulw:
-; STRIP: # %bb.0:
-; STRIP-NEXT: mul a0, a0, a1
-; STRIP-NEXT: addiw a0, a0, 1024
-; STRIP-NEXT: ret
-;
-; NO-STRIP-LABEL: mulw:
-; NO-STRIP: # %bb.0:
-; NO-STRIP-NEXT: mulw a0, a0, a1
-; NO-STRIP-NEXT: addiw a0, a0, 1024
-; NO-STRIP-NEXT: ret
- %mul = mul i32 %a, %b
- %ret = add i32 %mul, 1024
- ret i32 %ret
-}
-
-define i32 @slliw(i32 %a) {
-; STRIP-LABEL: slliw:
-; STRIP: # %bb.0:
-; STRIP-NEXT: slli a0, a0, 1
-; STRIP-NEXT: addiw a0, a0, 1024
-; STRIP-NEXT: ret
-;
-; NO-STRIP-LABEL: slliw:
-; NO-STRIP: # %bb.0:
-; NO-STRIP-NEXT: slliw a0, a0, 1
-; NO-STRIP-NEXT: addiw a0, a0, 1024
-; NO-STRIP-NEXT: ret
- %shl = shl i32 %a, 1
- %ret = add i32 %shl, 1024
- ret i32 %ret
-}
diff --git a/llvm/test/CodeGen/X86/GlobalISel/fconstant.ll b/llvm/test/CodeGen/X86/GlobalISel/fconstant.ll
index a9b2037..8d2ee3c 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/fconstant.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/fconstant.ll
@@ -10,27 +10,22 @@ define void @test_float(ptr %a , float %b) {
; CHECK64_SMALL: # %bb.0: # %entry
; CHECK64_SMALL-NEXT: movss {{.*#+}} xmm1 = [5.5E+0,0.0E+0,0.0E+0,0.0E+0]
; CHECK64_SMALL-NEXT: addss %xmm0, %xmm1
-; CHECK64_SMALL-NEXT: movd %xmm1, %eax
-; CHECK64_SMALL-NEXT: movl %eax, (%rdi)
+; CHECK64_SMALL-NEXT: movss %xmm1, (%rdi)
; CHECK64_SMALL-NEXT: retq
;
; CHECK64_LARGE-LABEL: test_float:
; CHECK64_LARGE: # %bb.0: # %entry
; CHECK64_LARGE-NEXT: movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax
; CHECK64_LARGE-NEXT: addss (%rax), %xmm0
-; CHECK64_LARGE-NEXT: movd %xmm0, %eax
-; CHECK64_LARGE-NEXT: movl %eax, (%rdi)
+; CHECK64_LARGE-NEXT: movss %xmm0, (%rdi)
; CHECK64_LARGE-NEXT: retq
;
; CHECK32-LABEL: test_float:
; CHECK32: # %bb.0: # %entry
; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; CHECK32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK32-NEXT: movss {{.*#+}} xmm0 = [5.5E+0,0.0E+0,0.0E+0,0.0E+0]
-; CHECK32-NEXT: movd %ecx, %xmm1
-; CHECK32-NEXT: addss %xmm0, %xmm1
-; CHECK32-NEXT: movd %xmm1, %ecx
-; CHECK32-NEXT: movl %ecx, (%eax)
+; CHECK32-NEXT: addss {{[0-9]+}}(%esp), %xmm0
+; CHECK32-NEXT: movss %xmm0, (%eax)
; CHECK32-NEXT: retl
entry:
%aa = fadd float 5.500000e+00, %b
diff --git a/llvm/test/CodeGen/X86/GlobalISel/regbankselect-sse-intrinsics.ll b/llvm/test/CodeGen/X86/GlobalISel/regbankselect-sse-intrinsics.ll
new file mode 100644
index 0000000..3388af6
--- /dev/null
+++ b/llvm/test/CodeGen/X86/GlobalISel/regbankselect-sse-intrinsics.ll
@@ -0,0 +1,153 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -mtriple=i686-- -mattr=+sse -global-isel -stop-after=regbankselect | FileCheck %s
+
+define void @test_x86_sse_max_ps(ptr %p1, ptr %p2) {
+ ; CHECK-LABEL: name: test_x86_sse_max_ps
+ ; CHECK: bb.1 (%ir-block.0):
+ ; CHECK-NEXT: [[FRAME_INDEX:%[0-9]+]]:gpr(p0) = G_FRAME_INDEX %fixed-stack.1
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:gpr(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (p0) from %fixed-stack.1)
+ ; CHECK-NEXT: [[FRAME_INDEX1:%[0-9]+]]:gpr(p0) = G_FRAME_INDEX %fixed-stack.0
+ ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:gpr(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load (p0) from %fixed-stack.0)
+ ; CHECK-NEXT: [[LOAD2:%[0-9]+]]:vecr(<4 x s32>) = G_LOAD [[LOAD]](p0) :: (load (<4 x s32>) from %ir.p1)
+ ; CHECK-NEXT: [[LOAD3:%[0-9]+]]:vecr(<4 x s32>) = G_LOAD [[LOAD1]](p0) :: (load (<4 x s32>) from %ir.p2)
+ ; CHECK-NEXT: [[INT:%[0-9]+]]:vecr(<4 x s32>) = G_INTRINSIC intrinsic(@llvm.x86.sse.max.ps), [[LOAD2]](<4 x s32>), [[LOAD3]](<4 x s32>)
+ ; CHECK-NEXT: G_STORE [[INT]](<4 x s32>), [[LOAD]](p0) :: (store (<4 x s32>) into %ir.p1)
+ ; CHECK-NEXT: RET 0
+ %a0 = load <4 x float>, ptr %p1, align 16
+ %a1 = load <4 x float>, ptr %p2, align 16
+ %res = call <4 x float> @llvm.x86.sse.max.ps(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [#uses=1]
+ store <4 x float> %res, ptr %p1
+ ret void
+}
+declare <4 x float> @llvm.x86.sse.max.ps(<4 x float>, <4 x float>) nounwind readnone
+
+
+define void @test_x86_sse_max_ss(ptr %p1, ptr %p2) {
+ ; CHECK-LABEL: name: test_x86_sse_max_ss
+ ; CHECK: bb.1 (%ir-block.0):
+ ; CHECK-NEXT: [[FRAME_INDEX:%[0-9]+]]:gpr(p0) = G_FRAME_INDEX %fixed-stack.1
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:gpr(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (p0) from %fixed-stack.1)
+ ; CHECK-NEXT: [[FRAME_INDEX1:%[0-9]+]]:gpr(p0) = G_FRAME_INDEX %fixed-stack.0
+ ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:gpr(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load (p0) from %fixed-stack.0)
+ ; CHECK-NEXT: [[LOAD2:%[0-9]+]]:vecr(<4 x s32>) = G_LOAD [[LOAD]](p0) :: (load (<4 x s32>) from %ir.p1)
+ ; CHECK-NEXT: [[LOAD3:%[0-9]+]]:vecr(<4 x s32>) = G_LOAD [[LOAD1]](p0) :: (load (<4 x s32>) from %ir.p2)
+ ; CHECK-NEXT: [[INT:%[0-9]+]]:vecr(<4 x s32>) = G_INTRINSIC intrinsic(@llvm.x86.sse.max.ss), [[LOAD2]](<4 x s32>), [[LOAD3]](<4 x s32>)
+ ; CHECK-NEXT: G_STORE [[INT]](<4 x s32>), [[LOAD]](p0) :: (store (<4 x s32>) into %ir.p1)
+ ; CHECK-NEXT: RET 0
+ %a0 = load <4 x float>, ptr %p1, align 16
+ %a1 = load <4 x float>, ptr %p2, align 16
+ %res = call <4 x float> @llvm.x86.sse.max.ss(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [#uses=1]
+ store <4 x float> %res, ptr %p1
+ ret void
+}
+declare <4 x float> @llvm.x86.sse.max.ss(<4 x float>, <4 x float>) nounwind readnone
+
+
+define void @test_x86_sse_min_ps(ptr %p1, ptr %p2) {
+ ; CHECK-LABEL: name: test_x86_sse_min_ps
+ ; CHECK: bb.1 (%ir-block.0):
+ ; CHECK-NEXT: [[FRAME_INDEX:%[0-9]+]]:gpr(p0) = G_FRAME_INDEX %fixed-stack.1
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:gpr(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (p0) from %fixed-stack.1)
+ ; CHECK-NEXT: [[FRAME_INDEX1:%[0-9]+]]:gpr(p0) = G_FRAME_INDEX %fixed-stack.0
+ ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:gpr(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load (p0) from %fixed-stack.0)
+ ; CHECK-NEXT: [[LOAD2:%[0-9]+]]:vecr(<4 x s32>) = G_LOAD [[LOAD]](p0) :: (load (<4 x s32>) from %ir.p1)
+ ; CHECK-NEXT: [[LOAD3:%[0-9]+]]:vecr(<4 x s32>) = G_LOAD [[LOAD1]](p0) :: (load (<4 x s32>) from %ir.p2)
+ ; CHECK-NEXT: [[INT:%[0-9]+]]:vecr(<4 x s32>) = G_INTRINSIC intrinsic(@llvm.x86.sse.min.ps), [[LOAD2]](<4 x s32>), [[LOAD3]](<4 x s32>)
+ ; CHECK-NEXT: G_STORE [[INT]](<4 x s32>), [[LOAD]](p0) :: (store (<4 x s32>) into %ir.p1)
+ ; CHECK-NEXT: RET 0
+ %a0 = load <4 x float>, ptr %p1, align 16
+ %a1 = load <4 x float>, ptr %p2, align 16
+ %res = call <4 x float> @llvm.x86.sse.min.ps(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [#uses=1]
+ store <4 x float> %res, ptr %p1
+ ret void
+}
+declare <4 x float> @llvm.x86.sse.min.ps(<4 x float>, <4 x float>) nounwind readnone
+
+
+define void @test_x86_sse_min_ss(ptr %p1, ptr %p2) {
+ ; CHECK-LABEL: name: test_x86_sse_min_ss
+ ; CHECK: bb.1 (%ir-block.0):
+ ; CHECK-NEXT: [[FRAME_INDEX:%[0-9]+]]:gpr(p0) = G_FRAME_INDEX %fixed-stack.1
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:gpr(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (p0) from %fixed-stack.1)
+ ; CHECK-NEXT: [[FRAME_INDEX1:%[0-9]+]]:gpr(p0) = G_FRAME_INDEX %fixed-stack.0
+ ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:gpr(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load (p0) from %fixed-stack.0)
+ ; CHECK-NEXT: [[LOAD2:%[0-9]+]]:vecr(<4 x s32>) = G_LOAD [[LOAD]](p0) :: (load (<4 x s32>) from %ir.p1)
+ ; CHECK-NEXT: [[LOAD3:%[0-9]+]]:vecr(<4 x s32>) = G_LOAD [[LOAD1]](p0) :: (load (<4 x s32>) from %ir.p2)
+ ; CHECK-NEXT: [[INT:%[0-9]+]]:vecr(<4 x s32>) = G_INTRINSIC intrinsic(@llvm.x86.sse.min.ss), [[LOAD2]](<4 x s32>), [[LOAD3]](<4 x s32>)
+ ; CHECK-NEXT: G_STORE [[INT]](<4 x s32>), [[LOAD]](p0) :: (store (<4 x s32>) into %ir.p1)
+ ; CHECK-NEXT: RET 0
+ %a0 = load <4 x float>, ptr %p1, align 16
+ %a1 = load <4 x float>, ptr %p2, align 16
+ %res = call <4 x float> @llvm.x86.sse.min.ss(<4 x float> %a0, <4 x float> %a1) ; <<4 x float>> [#uses=1]
+ store <4 x float> %res, ptr %p1
+ ret void
+}
+declare <4 x float> @llvm.x86.sse.min.ss(<4 x float>, <4 x float>) nounwind readnone
+
+
+define void @test_x86_sse_rcp_ps(ptr %p1, ptr %p2) {
+ ; CHECK-LABEL: name: test_x86_sse_rcp_ps
+ ; CHECK: bb.1 (%ir-block.0):
+ ; CHECK-NEXT: [[FRAME_INDEX:%[0-9]+]]:gpr(p0) = G_FRAME_INDEX %fixed-stack.1
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:gpr(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (p0) from %fixed-stack.1)
+ ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:vecr(<4 x s32>) = G_LOAD [[LOAD]](p0) :: (load (<4 x s32>) from %ir.p1)
+ ; CHECK-NEXT: [[INT:%[0-9]+]]:vecr(<4 x s32>) = G_INTRINSIC intrinsic(@llvm.x86.sse.rcp.ps), [[LOAD1]](<4 x s32>)
+ ; CHECK-NEXT: G_STORE [[INT]](<4 x s32>), [[LOAD]](p0) :: (store (<4 x s32>) into %ir.p1)
+ ; CHECK-NEXT: RET 0
+ %a0 = load <4 x float>, ptr %p1, align 16
+ %res = call <4 x float> @llvm.x86.sse.rcp.ps(<4 x float> %a0) ; <<4 x float>> [#uses=1]
+ store <4 x float> %res, ptr %p1
+ ret void
+}
+declare <4 x float> @llvm.x86.sse.rcp.ps(<4 x float>) nounwind readnone
+
+
+define void @test_x86_sse_rcp_ss(ptr %p1, ptr %p2) {
+ ; CHECK-LABEL: name: test_x86_sse_rcp_ss
+ ; CHECK: bb.1 (%ir-block.0):
+ ; CHECK-NEXT: [[FRAME_INDEX:%[0-9]+]]:gpr(p0) = G_FRAME_INDEX %fixed-stack.1
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:gpr(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (p0) from %fixed-stack.1)
+ ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:vecr(<4 x s32>) = G_LOAD [[LOAD]](p0) :: (load (<4 x s32>) from %ir.p1)
+ ; CHECK-NEXT: [[INT:%[0-9]+]]:vecr(<4 x s32>) = G_INTRINSIC intrinsic(@llvm.x86.sse.rcp.ss), [[LOAD1]](<4 x s32>)
+ ; CHECK-NEXT: G_STORE [[INT]](<4 x s32>), [[LOAD]](p0) :: (store (<4 x s32>) into %ir.p1)
+ ; CHECK-NEXT: RET 0
+ %a0 = load <4 x float>, ptr %p1, align 16
+ %res = call <4 x float> @llvm.x86.sse.rcp.ss(<4 x float> %a0) ; <<4 x float>> [#uses=1]
+ store <4 x float> %res, ptr %p1
+ ret void
+}
+declare <4 x float> @llvm.x86.sse.rcp.ss(<4 x float>) nounwind readnone
+
+
+define void @test_x86_sse_rsqrt_ps(ptr %p1, ptr %p2) {
+ ; CHECK-LABEL: name: test_x86_sse_rsqrt_ps
+ ; CHECK: bb.1 (%ir-block.0):
+ ; CHECK-NEXT: [[FRAME_INDEX:%[0-9]+]]:gpr(p0) = G_FRAME_INDEX %fixed-stack.1
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:gpr(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (p0) from %fixed-stack.1)
+ ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:vecr(<4 x s32>) = G_LOAD [[LOAD]](p0) :: (load (<4 x s32>) from %ir.p1)
+ ; CHECK-NEXT: [[INT:%[0-9]+]]:vecr(<4 x s32>) = G_INTRINSIC intrinsic(@llvm.x86.sse.rsqrt.ps), [[LOAD1]](<4 x s32>)
+ ; CHECK-NEXT: G_STORE [[INT]](<4 x s32>), [[LOAD]](p0) :: (store (<4 x s32>) into %ir.p1)
+ ; CHECK-NEXT: RET 0
+ %a0 = load <4 x float>, ptr %p1, align 16
+ %res = call <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float> %a0) ; <<4 x float>> [#uses=1]
+ store <4 x float> %res, ptr %p1
+ ret void
+}
+declare <4 x float> @llvm.x86.sse.rsqrt.ps(<4 x float>) nounwind readnone
+
+
+define void @test_x86_sse_rsqrt_ss(ptr %p1, ptr %p2) {
+ ; CHECK-LABEL: name: test_x86_sse_rsqrt_ss
+ ; CHECK: bb.1 (%ir-block.0):
+ ; CHECK-NEXT: [[FRAME_INDEX:%[0-9]+]]:gpr(p0) = G_FRAME_INDEX %fixed-stack.1
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:gpr(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (p0) from %fixed-stack.1)
+ ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:vecr(<4 x s32>) = G_LOAD [[LOAD]](p0) :: (load (<4 x s32>) from %ir.p1)
+ ; CHECK-NEXT: [[INT:%[0-9]+]]:vecr(<4 x s32>) = G_INTRINSIC intrinsic(@llvm.x86.sse.rsqrt.ss), [[LOAD1]](<4 x s32>)
+ ; CHECK-NEXT: G_STORE [[INT]](<4 x s32>), [[LOAD]](p0) :: (store (<4 x s32>) into %ir.p1)
+ ; CHECK-NEXT: RET 0
+ %a0 = load <4 x float>, ptr %p1, align 16
+ %res = call <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float> %a0) ; <<4 x float>> [#uses=1]
+ store <4 x float> %res, ptr %p1
+ ret void
+}
+declare <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float>) nounwind readnone
diff --git a/llvm/test/CodeGen/X86/GlobalISel/regbankselect-x87.ll b/llvm/test/CodeGen/X86/GlobalISel/regbankselect-x87.ll
index d09db0f..99d458a 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/regbankselect-x87.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/regbankselect-x87.ll
@@ -142,7 +142,7 @@ define float @f4(float %val) {
; X86-LABEL: name: f4
; X86: bb.1 (%ir-block.0):
; X86-NEXT: [[FRAME_INDEX:%[0-9]+]]:gpr(p0) = G_FRAME_INDEX %fixed-stack.0
- ; X86-NEXT: [[LOAD:%[0-9]+]]:gpr(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (s32) from %fixed-stack.0)
+ ; X86-NEXT: [[LOAD:%[0-9]+]]:psr(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load (s32) from %fixed-stack.0)
; X86-NEXT: $fp0 = COPY [[LOAD]](s32)
; X86-NEXT: RET 0, implicit $fp0
;
@@ -187,13 +187,10 @@ define void @f5(ptr %a, ptr %b) {
; X64-NEXT: {{ $}}
; X64-NEXT: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi
; X64-NEXT: [[COPY1:%[0-9]+]]:gpr(p0) = COPY $rsi
- ; X64-NEXT: [[LOAD:%[0-9]+]]:gpr(s64) = G_LOAD [[COPY]](p0) :: (load (s64) from %ir.a)
- ; X64-NEXT: [[LOAD1:%[0-9]+]]:gpr(s64) = G_LOAD [[COPY1]](p0) :: (load (s64) from %ir.b)
- ; X64-NEXT: [[COPY2:%[0-9]+]]:psr(s64) = COPY [[LOAD]](s64)
- ; X64-NEXT: [[COPY3:%[0-9]+]]:psr(s64) = COPY [[LOAD1]](s64)
- ; X64-NEXT: [[FADD:%[0-9]+]]:psr(s64) = G_FADD [[COPY2]], [[COPY3]]
- ; X64-NEXT: [[COPY4:%[0-9]+]]:gpr(s64) = COPY [[FADD]](s64)
- ; X64-NEXT: G_STORE [[COPY4]](s64), [[COPY]](p0) :: (store (s64) into %ir.a)
+ ; X64-NEXT: [[LOAD:%[0-9]+]]:psr(s64) = G_LOAD [[COPY]](p0) :: (load (s64) from %ir.a)
+ ; X64-NEXT: [[LOAD1:%[0-9]+]]:psr(s64) = G_LOAD [[COPY1]](p0) :: (load (s64) from %ir.b)
+ ; X64-NEXT: [[FADD:%[0-9]+]]:psr(s64) = G_FADD [[LOAD]], [[LOAD1]]
+ ; X64-NEXT: G_STORE [[FADD]](s64), [[COPY]](p0) :: (store (s64) into %ir.a)
; X64-NEXT: RET 0
%load1 = load double, ptr %a, align 8
%load2 = load double, ptr %b, align 8
@@ -210,11 +207,9 @@ define void @f6(ptr %0, ptr %1) {
; X86-NEXT: [[FRAME_INDEX1:%[0-9]+]]:gpr(p0) = G_FRAME_INDEX %fixed-stack.0
; X86-NEXT: [[LOAD1:%[0-9]+]]:gpr(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load (p0) from %fixed-stack.0)
; X86-NEXT: [[C:%[0-9]+]]:psr(s32) = G_FCONSTANT float 2.000000e+01
- ; X86-NEXT: [[LOAD2:%[0-9]+]]:gpr(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.0)
- ; X86-NEXT: [[COPY:%[0-9]+]]:psr(s32) = COPY [[LOAD2]](s32)
- ; X86-NEXT: [[FADD:%[0-9]+]]:psr(s32) = G_FADD [[COPY]], [[C]]
- ; X86-NEXT: [[COPY1:%[0-9]+]]:gpr(s32) = COPY [[FADD]](s32)
- ; X86-NEXT: G_STORE [[COPY1]](s32), [[LOAD1]](p0) :: (store (s32) into %ir.1)
+ ; X86-NEXT: [[LOAD2:%[0-9]+]]:psr(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.0)
+ ; X86-NEXT: [[FADD:%[0-9]+]]:psr(s32) = G_FADD [[LOAD2]], [[C]]
+ ; X86-NEXT: G_STORE [[FADD]](s32), [[LOAD1]](p0) :: (store (s32) into %ir.1)
; X86-NEXT: RET 0
;
; X64-LABEL: name: f6
@@ -224,11 +219,9 @@ define void @f6(ptr %0, ptr %1) {
; X64-NEXT: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi
; X64-NEXT: [[COPY1:%[0-9]+]]:gpr(p0) = COPY $rsi
; X64-NEXT: [[C:%[0-9]+]]:psr(s32) = G_FCONSTANT float 2.000000e+01
- ; X64-NEXT: [[LOAD:%[0-9]+]]:gpr(s32) = G_LOAD [[COPY]](p0) :: (load (s32) from %ir.0)
- ; X64-NEXT: [[COPY2:%[0-9]+]]:psr(s32) = COPY [[LOAD]](s32)
- ; X64-NEXT: [[FADD:%[0-9]+]]:psr(s32) = G_FADD [[COPY2]], [[C]]
- ; X64-NEXT: [[COPY3:%[0-9]+]]:gpr(s32) = COPY [[FADD]](s32)
- ; X64-NEXT: G_STORE [[COPY3]](s32), [[COPY1]](p0) :: (store (s32) into %ir.1)
+ ; X64-NEXT: [[LOAD:%[0-9]+]]:psr(s32) = G_LOAD [[COPY]](p0) :: (load (s32) from %ir.0)
+ ; X64-NEXT: [[FADD:%[0-9]+]]:psr(s32) = G_FADD [[LOAD]], [[C]]
+ ; X64-NEXT: G_STORE [[FADD]](s32), [[COPY1]](p0) :: (store (s32) into %ir.1)
; X64-NEXT: RET 0
%load1 = load float, ptr %0
%add = fadd float %load1, 20.0
diff --git a/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll b/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll
index 4242d84..39c7ce1 100644
--- a/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll
+++ b/llvm/test/CodeGen/X86/any_extend_vector_inreg_of_broadcast.ll
@@ -314,8 +314,8 @@ define void @vec64_i16_widen_to_i32_factor2_broadcast_to_v2i32_factor2(ptr %in.v
;
; AVX512F-LABEL: vec64_i16_widen_to_i32_factor2_broadcast_to_v2i32_factor2:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0
+; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
+; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,10,11,0,1,14,15,u,u,u,u,u,u,u,u]
; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
@@ -324,8 +324,8 @@ define void @vec64_i16_widen_to_i32_factor2_broadcast_to_v2i32_factor2(ptr %in.v
;
; AVX512DQ-LABEL: vec64_i16_widen_to_i32_factor2_broadcast_to_v2i32_factor2:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512DQ-NEXT: vpaddb (%rsi), %ymm0, %ymm0
+; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm0
+; AVX512DQ-NEXT: vpaddb (%rsi), %xmm0, %xmm0
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,10,11,0,1,14,15,u,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpaddb (%rdx), %ymm0, %ymm0
; AVX512DQ-NEXT: vmovdqa %ymm0, (%rcx)
@@ -981,7 +981,7 @@ define void @vec128_i32_widen_to_i64_factor2_broadcast_to_v2i64_factor2(ptr %in.
; AVX512F-NEXT: vpmovsxbd {{.*#+}} xmm0 = [0,5,0,7]
; AVX512F-NEXT: vmovdqa (%rdi), %ymm1
; AVX512F-NEXT: vpaddb (%rsi), %ymm1, %ymm1
-; AVX512F-NEXT: vpermd %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT: vpermd %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
; AVX512F-NEXT: vzeroupper
@@ -992,7 +992,7 @@ define void @vec128_i32_widen_to_i64_factor2_broadcast_to_v2i64_factor2(ptr %in.
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} xmm0 = [0,5,0,7]
; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm1
; AVX512DQ-NEXT: vpaddb (%rsi), %ymm1, %ymm1
-; AVX512DQ-NEXT: vpermd %zmm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpermd %ymm1, %ymm0, %ymm0
; AVX512DQ-NEXT: vpaddb (%rdx), %ymm0, %ymm0
; AVX512DQ-NEXT: vmovdqa %ymm0, (%rcx)
; AVX512DQ-NEXT: vzeroupper
@@ -3507,13 +3507,12 @@ define void @vec384_i16_widen_to_i32_factor2_broadcast_to_v12i32_factor12(ptr %i
;
; AVX512F-LABEL: vec384_i16_widen_to_i32_factor2_broadcast_to_v12i32_factor12:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0
+; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
; AVX512F-NEXT: vmovdqa 48(%rdi), %xmm1
; AVX512F-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1
-; AVX512F-NEXT: vpbroadcastw %xmm0, %ymm2
-; AVX512F-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7],ymm2[8],ymm1[9],ymm2[10],ymm1[11],ymm2[12],ymm1[13],ymm2[14],ymm1[15]
+; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
; AVX512F-NEXT: vpbroadcastw %xmm0, %ymm0
+; AVX512F-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
; AVX512F-NEXT: vpaddb (%rdx), %ymm1, %ymm1
; AVX512F-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
; AVX512F-NEXT: vmovdqa %ymm0, 32(%rcx)
@@ -3523,13 +3522,12 @@ define void @vec384_i16_widen_to_i32_factor2_broadcast_to_v12i32_factor12(ptr %i
;
; AVX512DQ-LABEL: vec384_i16_widen_to_i32_factor2_broadcast_to_v12i32_factor12:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512DQ-NEXT: vpaddb (%rsi), %ymm0, %ymm0
+; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm0
; AVX512DQ-NEXT: vmovdqa 48(%rdi), %xmm1
; AVX512DQ-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1
-; AVX512DQ-NEXT: vpbroadcastw %xmm0, %ymm2
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7],ymm2[8],ymm1[9],ymm2[10],ymm1[11],ymm2[12],ymm1[13],ymm2[14],ymm1[15]
+; AVX512DQ-NEXT: vpaddb (%rsi), %xmm0, %xmm0
; AVX512DQ-NEXT: vpbroadcastw %xmm0, %ymm0
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
; AVX512DQ-NEXT: vpaddb (%rdx), %ymm1, %ymm1
; AVX512DQ-NEXT: vpaddb 32(%rdx), %ymm0, %ymm0
; AVX512DQ-NEXT: vmovdqa %ymm0, 32(%rcx)
@@ -3768,10 +3766,10 @@ define void @vec384_i16_widen_to_i64_factor4_broadcast_to_v6i64_factor6(ptr %in.
;
; AVX512F-LABEL: vec384_i16_widen_to_i64_factor4_broadcast_to_v6i64_factor6:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0
+; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
; AVX512F-NEXT: vmovdqa 48(%rdi), %xmm1
; AVX512F-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1
+; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
; AVX512F-NEXT: vpbroadcastq %xmm0, %ymm2
; AVX512F-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3],ymm2[4],ymm1[5,6,7],ymm2[8],ymm1[9,10,11],ymm2[12],ymm1[13,14,15]
; AVX512F-NEXT: vpbroadcastw %xmm0, %ymm0
@@ -3784,10 +3782,10 @@ define void @vec384_i16_widen_to_i64_factor4_broadcast_to_v6i64_factor6(ptr %in.
;
; AVX512DQ-LABEL: vec384_i16_widen_to_i64_factor4_broadcast_to_v6i64_factor6:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512DQ-NEXT: vpaddb (%rsi), %ymm0, %ymm0
+; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm0
; AVX512DQ-NEXT: vmovdqa 48(%rdi), %xmm1
; AVX512DQ-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1
+; AVX512DQ-NEXT: vpaddb (%rsi), %xmm0, %xmm0
; AVX512DQ-NEXT: vpbroadcastq %xmm0, %ymm2
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3],ymm2[4],ymm1[5,6,7],ymm2[8],ymm1[9,10,11],ymm2[12],ymm1[13,14,15]
; AVX512DQ-NEXT: vpbroadcastw %xmm0, %ymm0
@@ -4147,9 +4145,9 @@ define void @vec384_i16_widen_to_i192_factor12_broadcast_to_v2i192_factor2(ptr %
;
; AVX512F-LABEL: vec384_i16_widen_to_i192_factor12_broadcast_to_v2i192_factor2:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0
+; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
; AVX512F-NEXT: vmovdqa 48(%rdi), %xmm1
+; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
; AVX512F-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1
; AVX512F-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7]
; AVX512F-NEXT: vpbroadcastw %xmm0, %xmm0
@@ -4161,9 +4159,9 @@ define void @vec384_i16_widen_to_i192_factor12_broadcast_to_v2i192_factor2(ptr %
;
; AVX512DQ-LABEL: vec384_i16_widen_to_i192_factor12_broadcast_to_v2i192_factor2:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512DQ-NEXT: vpaddb (%rsi), %ymm0, %ymm0
+; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm0
; AVX512DQ-NEXT: vmovdqa 48(%rdi), %xmm1
+; AVX512DQ-NEXT: vpaddb (%rsi), %xmm0, %xmm0
; AVX512DQ-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7]
; AVX512DQ-NEXT: vpbroadcastw %xmm0, %xmm0
diff --git a/llvm/test/CodeGen/X86/dpbusd.ll b/llvm/test/CodeGen/X86/dpbusd.ll
index fbea08e..04d7a96 100644
--- a/llvm/test/CodeGen/X86/dpbusd.ll
+++ b/llvm/test/CodeGen/X86/dpbusd.ll
@@ -26,7 +26,7 @@ define i32 @no_dpbusd(ptr%a, ptr%b, i32 %c, i32 %n) {
; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
; AVX512-NEXT: vpmaddwd %ymm0, %ymm1, %ymm0
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
diff --git a/llvm/test/CodeGen/X86/dpbusd_i4.ll b/llvm/test/CodeGen/X86/dpbusd_i4.ll
index 906fead..a212f99 100644
--- a/llvm/test/CodeGen/X86/dpbusd_i4.ll
+++ b/llvm/test/CodeGen/X86/dpbusd_i4.ll
@@ -86,7 +86,7 @@ define i32 @mul_sext_i4i4(<16 x i4> %a, <16 x i4> %b, i32 %c) {
; CHECK-NEXT: vpsraw $12, %ymm0, %ymm0
; CHECK-NEXT: vpmaddwd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm1
-; CHECK-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-3.ll
index 1436922..6d5fc9e 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-3.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-3.ll
@@ -1828,22 +1828,22 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
;
; AVX512-LABEL: load_i16_stride3_vf32:
; AVX512: # %bb.0:
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm1 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm0 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
; AVX512-NEXT: vmovdqa 128(%rdi), %ymm5
; AVX512-NEXT: vmovdqa 160(%rdi), %ymm6
-; AVX512-NEXT: vmovdqa %ymm1, %ymm0
-; AVX512-NEXT: vpternlogq $202, %ymm5, %ymm6, %ymm0
-; AVX512-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,3,0,1]
-; AVX512-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3],ymm2[4],ymm0[5,6],ymm2[7],ymm0[8],ymm2[9],ymm0[10,11],ymm2[12],ymm0[13,14],ymm2[15]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,4,5,10,11,16,17,22,23,28,29,18,19,24,25,30,31,20,21,26,27]
-; AVX512-NEXT: vmovdqa 112(%rdi), %xmm0
+; AVX512-NEXT: vmovdqa %ymm0, %ymm1
+; AVX512-NEXT: vpternlogq $202, %ymm5, %ymm6, %ymm1
+; AVX512-NEXT: vpermq {{.*#+}} ymm2 = ymm1[2,3,0,1]
+; AVX512-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7],ymm1[8],ymm2[9],ymm1[10,11],ymm2[12],ymm1[13,14],ymm2[15]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,4,5,10,11,16,17,22,23,28,29,18,19,24,25,30,31,20,21,26,27]
+; AVX512-NEXT: vmovdqa 112(%rdi), %xmm1
; AVX512-NEXT: vmovdqa 96(%rdi), %xmm2
-; AVX512-NEXT: vpblendw {{.*#+}} xmm4 = xmm2[0],xmm0[1],xmm2[2,3],xmm0[4],xmm2[5,6],xmm0[7]
+; AVX512-NEXT: vpblendw {{.*#+}} xmm4 = xmm2[0],xmm1[1],xmm2[2,3],xmm1[4],xmm2[5,6],xmm1[7]
; AVX512-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[0,1,6,7,12,13,2,3,8,9,14,15,u,u,u,u]
; AVX512-NEXT: vpblendd {{.*#+}} ymm7 = ymm4[0,1,2],ymm3[3,4,5,6,7]
; AVX512-NEXT: vmovdqa (%rdi), %ymm8
; AVX512-NEXT: vmovdqa 32(%rdi), %ymm9
-; AVX512-NEXT: vmovdqa %ymm1, %ymm3
+; AVX512-NEXT: vmovdqa %ymm0, %ymm3
; AVX512-NEXT: vpternlogq $202, %ymm9, %ymm8, %ymm3
; AVX512-NEXT: vpermq {{.*#+}} ymm4 = ymm3[2,3,0,1]
; AVX512-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7],ymm3[8],ymm4[9],ymm3[10,11],ymm4[12],ymm3[13,14],ymm4[15]
@@ -1857,14 +1857,14 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-NEXT: vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,6,5,4,7]
; AVX512-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5,6,7]
; AVX512-NEXT: vinserti64x4 $1, %ymm7, %zmm10, %zmm7
-; AVX512-NEXT: vmovdqa %ymm1, %ymm10
+; AVX512-NEXT: vmovdqa %ymm0, %ymm10
; AVX512-NEXT: vpternlogq $202, %ymm6, %ymm5, %ymm10
; AVX512-NEXT: vpermq {{.*#+}} ymm11 = ymm10[2,3,0,1]
; AVX512-NEXT: vpblendw {{.*#+}} ymm10 = ymm10[0,1],ymm11[2],ymm10[3,4],ymm11[5],ymm10[6,7,8,9],ymm11[10],ymm10[11,12],ymm11[13],ymm10[14,15]
; AVX512-NEXT: vmovdqa {{.*#+}} ymm11 = [2,3,8,9,14,15,4,5,10,11,0,1,6,7,12,13,18,19,24,25,30,31,20,21,26,27,16,17,22,23,28,29]
; AVX512-NEXT: vpshufb %ymm11, %ymm10, %ymm10
-; AVX512-NEXT: vpblendw {{.*#+}} xmm12 = xmm2[0,1],xmm0[2],xmm2[3,4],xmm0[5],xmm2[6,7]
-; AVX512-NEXT: vpshufb %xmm11, %xmm12, %xmm12
+; AVX512-NEXT: vpblendw {{.*#+}} xmm12 = xmm2[0,1],xmm1[2],xmm2[3,4],xmm1[5],xmm2[6,7]
+; AVX512-NEXT: vpshufb {{.*#+}} xmm12 = xmm12[2,3,8,9,14,15,4,5,10,11,10,11,10,11,10,11]
; AVX512-NEXT: vpblendw {{.*#+}} xmm12 = xmm12[0,1,2,3,4],xmm10[5,6,7]
; AVX512-NEXT: vpblendd {{.*#+}} ymm10 = ymm12[0,1,2,3],ymm10[4,5,6,7]
; AVX512-NEXT: vmovdqa {{.*#+}} ymm12 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
@@ -1885,21 +1885,19 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-NEXT: vpblendw {{.*#+}} ymm5 = ymm5[0],ymm12[1,2],ymm5[3],ymm12[4,5],ymm5[6],ymm12[7],ymm5[8],ymm12[9,10],ymm5[11],ymm12[12,13],ymm5[14],ymm12[15]
; AVX512-NEXT: vmovdqa {{.*#+}} ymm6 = [4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15,20,21,26,27,16,17,22,23,28,29,18,19,24,25,30,31]
; AVX512-NEXT: vpshufb %ymm6, %ymm5, %ymm5
-; AVX512-NEXT: vpternlogq $202, %ymm8, %ymm9, %ymm1
-; AVX512-NEXT: vpermq {{.*#+}} ymm8 = ymm1[2,3,0,1]
-; AVX512-NEXT: vpblendw {{.*#+}} ymm1 = ymm8[0],ymm1[1,2],ymm8[3],ymm1[4,5],ymm8[6],ymm1[7],ymm8[8],ymm1[9,10],ymm8[11],ymm1[12,13],ymm8[14],ymm1[15]
-; AVX512-NEXT: vpshufb %ymm6, %ymm1, %ymm1
-; AVX512-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0],xmm3[1],xmm4[2,3],xmm3[4],xmm4[5,6],xmm3[7]
-; AVX512-NEXT: vpshufb %xmm6, %xmm3, %xmm3
-; AVX512-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm3[5,6,7]
-; AVX512-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2],xmm0[3,4],xmm2[5],xmm0[6,7]
-; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,10,11,0,1,6,7,12,13,14,15,0,1,2,3]
-; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
-; AVX512-NEXT: vextracti32x4 $2, %zmm0, %xmm0
-; AVX512-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm5[5,6,7]
-; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7]
-; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3,4],xmm2[5],xmm1[6,7]
+; AVX512-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[4,5,10,11,0,1,6,7,12,13,14,15,0,1,2,3]
+; AVX512-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm5[5,6,7]
+; AVX512-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm5[4,5,6,7]
+; AVX512-NEXT: vpternlogq $202, %ymm8, %ymm9, %ymm0
+; AVX512-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,3,0,1]
+; AVX512-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0],ymm0[1,2],ymm2[3],ymm0[4,5],ymm2[6],ymm0[7],ymm2[8],ymm0[9,10],ymm2[11],ymm0[12,13],ymm2[14],ymm0[15]
+; AVX512-NEXT: vpshufb %ymm6, %ymm0, %ymm0
+; AVX512-NEXT: vpblendw {{.*#+}} xmm2 = xmm4[0],xmm3[1],xmm4[2,3],xmm3[4],xmm4[5,6],xmm3[7]
+; AVX512-NEXT: vpshufb %xmm6, %xmm2, %xmm2
+; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7]
+; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512-NEXT: vmovdqa64 %zmm7, (%rsi)
; AVX512-NEXT: vmovdqa64 %zmm10, (%rdx)
; AVX512-NEXT: vmovdqa64 %zmm0, (%rcx)
@@ -1908,22 +1906,22 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
;
; AVX512-FCP-LABEL: load_i16_stride3_vf32:
; AVX512-FCP: # %bb.0:
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm1 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
; AVX512-FCP-NEXT: vmovdqa 128(%rdi), %ymm5
; AVX512-FCP-NEXT: vmovdqa 160(%rdi), %ymm6
-; AVX512-FCP-NEXT: vmovdqa %ymm1, %ymm0
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm5, %ymm6, %ymm0
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,3,0,1]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3],ymm2[4],ymm0[5,6],ymm2[7],ymm0[8],ymm2[9],ymm0[10,11],ymm2[12],ymm0[13,14],ymm2[15]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,4,5,10,11,16,17,22,23,28,29,18,19,24,25,30,31,20,21,26,27]
-; AVX512-FCP-NEXT: vmovdqa 112(%rdi), %xmm0
+; AVX512-FCP-NEXT: vmovdqa %ymm0, %ymm1
+; AVX512-FCP-NEXT: vpternlogq $202, %ymm5, %ymm6, %ymm1
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm1[2,3,0,1]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7],ymm1[8],ymm2[9],ymm1[10,11],ymm2[12],ymm1[13,14],ymm2[15]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,4,5,10,11,16,17,22,23,28,29,18,19,24,25,30,31,20,21,26,27]
+; AVX512-FCP-NEXT: vmovdqa 112(%rdi), %xmm1
; AVX512-FCP-NEXT: vmovdqa 96(%rdi), %xmm2
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm2[0],xmm0[1],xmm2[2,3],xmm0[4],xmm2[5,6],xmm0[7]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm2[0],xmm1[1],xmm2[2,3],xmm1[4],xmm2[5,6],xmm1[7]
; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[0,1,6,7,12,13,2,3,8,9,14,15,u,u,u,u]
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm4[0,1,2],ymm3[3,4,5,6,7]
; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm8
; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm9
-; AVX512-FCP-NEXT: vmovdqa %ymm1, %ymm3
+; AVX512-FCP-NEXT: vmovdqa %ymm0, %ymm3
; AVX512-FCP-NEXT: vpternlogq $202, %ymm9, %ymm8, %ymm3
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm3[2,3,0,1]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7],ymm3[8],ymm4[9],ymm3[10,11],ymm4[12],ymm3[13,14],ymm4[15]
@@ -1937,14 +1935,14 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,6,5,4,7]
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5,6,7]
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm10, %zmm7
-; AVX512-FCP-NEXT: vmovdqa %ymm1, %ymm10
+; AVX512-FCP-NEXT: vmovdqa %ymm0, %ymm10
; AVX512-FCP-NEXT: vpternlogq $202, %ymm6, %ymm5, %ymm10
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm10[2,3,0,1]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm10 = ymm10[0,1],ymm11[2],ymm10[3,4],ymm11[5],ymm10[6,7,8,9],ymm11[10],ymm10[11,12],ymm11[13],ymm10[14,15]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [2,3,8,9,14,15,4,5,10,11,0,1,6,7,12,13,18,19,24,25,30,31,20,21,26,27,16,17,22,23,28,29]
; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm10, %ymm10
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm12 = xmm2[0,1],xmm0[2],xmm2[3,4],xmm0[5],xmm2[6,7]
-; AVX512-FCP-NEXT: vpshufb %xmm11, %xmm12, %xmm12
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm12 = xmm2[0,1],xmm1[2],xmm2[3,4],xmm1[5],xmm2[6,7]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm12[2,3,8,9,14,15,4,5,10,11,10,11,10,11,10,11]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm12 = xmm12[0,1,2,3,4],xmm10[5,6,7]
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm12[0,1,2,3],ymm10[4,5,6,7]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
@@ -1965,21 +1963,19 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm5 = ymm5[0],ymm12[1,2],ymm5[3],ymm12[4,5],ymm5[6],ymm12[7],ymm5[8],ymm12[9,10],ymm5[11],ymm12[12,13],ymm5[14],ymm12[15]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15,20,21,26,27,16,17,22,23,28,29,18,19,24,25,30,31]
; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm5, %ymm5
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm8, %ymm9, %ymm1
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm1[2,3,0,1]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm8[0],ymm1[1,2],ymm8[3],ymm1[4,5],ymm8[6],ymm1[7],ymm8[8],ymm1[9,10],ymm8[11],ymm1[12,13],ymm8[14],ymm1[15]
-; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm1, %ymm1
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0],xmm3[1],xmm4[2,3],xmm3[4],xmm4[5,6],xmm3[7]
-; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm3, %xmm3
-; AVX512-FCP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm3[5,6,7]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2],xmm0[3,4],xmm2[5],xmm0[6,7]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,10,11,0,1,6,7,12,13,14,15,0,1,2,3]
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
-; AVX512-FCP-NEXT: vextracti32x4 $2, %zmm0, %xmm0
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm5[5,6,7]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7]
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3,4],xmm2[5],xmm1[6,7]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[4,5,10,11,0,1,6,7,12,13,14,15,0,1,2,3]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm5[5,6,7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm5[4,5,6,7]
+; AVX512-FCP-NEXT: vpternlogq $202, %ymm8, %ymm9, %ymm0
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,3,0,1]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0],ymm0[1,2],ymm2[3],ymm0[4,5],ymm2[6],ymm0[7],ymm2[8],ymm0[9,10],ymm2[11],ymm0[12,13],ymm2[14],ymm0[15]
+; AVX512-FCP-NEXT: vpshufb %ymm6, %ymm0, %ymm0
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm4[0],xmm3[1],xmm4[2,3],xmm3[4],xmm4[5,6],xmm3[7]
+; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm2, %xmm2
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7]
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512-FCP-NEXT: vmovdqa64 %zmm7, (%rsi)
; AVX512-FCP-NEXT: vmovdqa64 %zmm10, (%rdx)
; AVX512-FCP-NEXT: vmovdqa64 %zmm0, (%rcx)
@@ -1988,22 +1984,22 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
;
; AVX512DQ-LABEL: load_i16_stride3_vf32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm1 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm0 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
; AVX512DQ-NEXT: vmovdqa 128(%rdi), %ymm5
; AVX512DQ-NEXT: vmovdqa 160(%rdi), %ymm6
-; AVX512DQ-NEXT: vmovdqa %ymm1, %ymm0
-; AVX512DQ-NEXT: vpternlogq $202, %ymm5, %ymm6, %ymm0
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,3,0,1]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3],ymm2[4],ymm0[5,6],ymm2[7],ymm0[8],ymm2[9],ymm0[10,11],ymm2[12],ymm0[13,14],ymm2[15]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm3 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,4,5,10,11,16,17,22,23,28,29,18,19,24,25,30,31,20,21,26,27]
-; AVX512DQ-NEXT: vmovdqa 112(%rdi), %xmm0
+; AVX512DQ-NEXT: vmovdqa %ymm0, %ymm1
+; AVX512DQ-NEXT: vpternlogq $202, %ymm5, %ymm6, %ymm1
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm2 = ymm1[2,3,0,1]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7],ymm1[8],ymm2[9],ymm1[10,11],ymm2[12],ymm1[13,14],ymm2[15]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm3 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,4,5,10,11,16,17,22,23,28,29,18,19,24,25,30,31,20,21,26,27]
+; AVX512DQ-NEXT: vmovdqa 112(%rdi), %xmm1
; AVX512DQ-NEXT: vmovdqa 96(%rdi), %xmm2
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm4 = xmm2[0],xmm0[1],xmm2[2,3],xmm0[4],xmm2[5,6],xmm0[7]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm4 = xmm2[0],xmm1[1],xmm2[2,3],xmm1[4],xmm2[5,6],xmm1[7]
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[0,1,6,7,12,13,2,3,8,9,14,15,u,u,u,u]
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm7 = ymm4[0,1,2],ymm3[3,4,5,6,7]
; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm8
; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm9
-; AVX512DQ-NEXT: vmovdqa %ymm1, %ymm3
+; AVX512DQ-NEXT: vmovdqa %ymm0, %ymm3
; AVX512DQ-NEXT: vpternlogq $202, %ymm9, %ymm8, %ymm3
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm4 = ymm3[2,3,0,1]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7],ymm3[8],ymm4[9],ymm3[10,11],ymm4[12],ymm3[13,14],ymm4[15]
@@ -2017,14 +2013,14 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,6,5,4,7]
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5,6,7]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm7, %zmm10, %zmm7
-; AVX512DQ-NEXT: vmovdqa %ymm1, %ymm10
+; AVX512DQ-NEXT: vmovdqa %ymm0, %ymm10
; AVX512DQ-NEXT: vpternlogq $202, %ymm6, %ymm5, %ymm10
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm11 = ymm10[2,3,0,1]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm10 = ymm10[0,1],ymm11[2],ymm10[3,4],ymm11[5],ymm10[6,7,8,9],ymm11[10],ymm10[11,12],ymm11[13],ymm10[14,15]
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm11 = [2,3,8,9,14,15,4,5,10,11,0,1,6,7,12,13,18,19,24,25,30,31,20,21,26,27,16,17,22,23,28,29]
; AVX512DQ-NEXT: vpshufb %ymm11, %ymm10, %ymm10
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm12 = xmm2[0,1],xmm0[2],xmm2[3,4],xmm0[5],xmm2[6,7]
-; AVX512DQ-NEXT: vpshufb %xmm11, %xmm12, %xmm12
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm12 = xmm2[0,1],xmm1[2],xmm2[3,4],xmm1[5],xmm2[6,7]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm12 = xmm12[2,3,8,9,14,15,4,5,10,11,10,11,10,11,10,11]
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm12 = xmm12[0,1,2,3,4],xmm10[5,6,7]
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm10 = ymm12[0,1,2,3],ymm10[4,5,6,7]
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm12 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
@@ -2045,21 +2041,19 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm5 = ymm5[0],ymm12[1,2],ymm5[3],ymm12[4,5],ymm5[6],ymm12[7],ymm5[8],ymm12[9,10],ymm5[11],ymm12[12,13],ymm5[14],ymm12[15]
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm6 = [4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15,20,21,26,27,16,17,22,23,28,29,18,19,24,25,30,31]
; AVX512DQ-NEXT: vpshufb %ymm6, %ymm5, %ymm5
-; AVX512DQ-NEXT: vpternlogq $202, %ymm8, %ymm9, %ymm1
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm8 = ymm1[2,3,0,1]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm1 = ymm8[0],ymm1[1,2],ymm8[3],ymm1[4,5],ymm8[6],ymm1[7],ymm8[8],ymm1[9,10],ymm8[11],ymm1[12,13],ymm8[14],ymm1[15]
-; AVX512DQ-NEXT: vpshufb %ymm6, %ymm1, %ymm1
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0],xmm3[1],xmm4[2,3],xmm3[4],xmm4[5,6],xmm3[7]
-; AVX512DQ-NEXT: vpshufb %xmm6, %xmm3, %xmm3
-; AVX512DQ-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm3[5,6,7]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2],xmm0[3,4],xmm2[5],xmm0[6,7]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,10,11,0,1,6,7,12,13,14,15,0,1,2,3]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
-; AVX512DQ-NEXT: vextracti32x4 $2, %zmm0, %xmm0
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm5[5,6,7]
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3,4],xmm2[5],xmm1[6,7]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[4,5,10,11,0,1,6,7,12,13,14,15,0,1,2,3]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm5[5,6,7]
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm5[4,5,6,7]
+; AVX512DQ-NEXT: vpternlogq $202, %ymm8, %ymm9, %ymm0
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,3,0,1]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0],ymm0[1,2],ymm2[3],ymm0[4,5],ymm2[6],ymm0[7],ymm2[8],ymm0[9,10],ymm2[11],ymm0[12,13],ymm2[14],ymm0[15]
+; AVX512DQ-NEXT: vpshufb %ymm6, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm2 = xmm4[0],xmm3[1],xmm4[2,3],xmm3[4],xmm4[5,6],xmm3[7]
+; AVX512DQ-NEXT: vpshufb %xmm6, %xmm2, %xmm2
+; AVX512DQ-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512DQ-NEXT: vmovdqa64 %zmm7, (%rsi)
; AVX512DQ-NEXT: vmovdqa64 %zmm10, (%rdx)
; AVX512DQ-NEXT: vmovdqa64 %zmm0, (%rcx)
@@ -2068,22 +2062,22 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
;
; AVX512DQ-FCP-LABEL: load_i16_stride3_vf32:
; AVX512DQ-FCP: # %bb.0:
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm1 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
; AVX512DQ-FCP-NEXT: vmovdqa 128(%rdi), %ymm5
; AVX512DQ-FCP-NEXT: vmovdqa 160(%rdi), %ymm6
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm1, %ymm0
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm5, %ymm6, %ymm0
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,3,0,1]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3],ymm2[4],ymm0[5,6],ymm2[7],ymm0[8],ymm2[9],ymm0[10,11],ymm2[12],ymm0[13,14],ymm2[15]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,4,5,10,11,16,17,22,23,28,29,18,19,24,25,30,31,20,21,26,27]
-; AVX512DQ-FCP-NEXT: vmovdqa 112(%rdi), %xmm0
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, %ymm1
+; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm5, %ymm6, %ymm1
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm1[2,3,0,1]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7],ymm1[8],ymm2[9],ymm1[10,11],ymm2[12],ymm1[13,14],ymm2[15]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,4,5,10,11,16,17,22,23,28,29,18,19,24,25,30,31,20,21,26,27]
+; AVX512DQ-FCP-NEXT: vmovdqa 112(%rdi), %xmm1
; AVX512DQ-FCP-NEXT: vmovdqa 96(%rdi), %xmm2
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm2[0],xmm0[1],xmm2[2,3],xmm0[4],xmm2[5,6],xmm0[7]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm2[0],xmm1[1],xmm2[2,3],xmm1[4],xmm2[5,6],xmm1[7]
; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[0,1,6,7,12,13,2,3,8,9,14,15,u,u,u,u]
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm4[0,1,2],ymm3[3,4,5,6,7]
; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm8
; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm9
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm1, %ymm3
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, %ymm3
; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm9, %ymm8, %ymm3
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm3[2,3,0,1]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7],ymm3[8],ymm4[9],ymm3[10,11],ymm4[12],ymm3[13,14],ymm4[15]
@@ -2097,14 +2091,14 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,6,5,4,7]
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5,6,7]
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm10, %zmm7
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm1, %ymm10
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, %ymm10
; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm6, %ymm5, %ymm10
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm11 = ymm10[2,3,0,1]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm10 = ymm10[0,1],ymm11[2],ymm10[3,4],ymm11[5],ymm10[6,7,8,9],ymm11[10],ymm10[11,12],ymm11[13],ymm10[14,15]
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [2,3,8,9,14,15,4,5,10,11,0,1,6,7,12,13,18,19,24,25,30,31,20,21,26,27,16,17,22,23,28,29]
; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm10, %ymm10
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm12 = xmm2[0,1],xmm0[2],xmm2[3,4],xmm0[5],xmm2[6,7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm11, %xmm12, %xmm12
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm12 = xmm2[0,1],xmm1[2],xmm2[3,4],xmm1[5],xmm2[6,7]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm12 = xmm12[2,3,8,9,14,15,4,5,10,11,10,11,10,11,10,11]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm12 = xmm12[0,1,2,3,4],xmm10[5,6,7]
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm10 = ymm12[0,1,2,3],ymm10[4,5,6,7]
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm12 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
@@ -2125,21 +2119,19 @@ define void @load_i16_stride3_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm5 = ymm5[0],ymm12[1,2],ymm5[3],ymm12[4,5],ymm5[6],ymm12[7],ymm5[8],ymm12[9,10],ymm5[11],ymm12[12,13],ymm5[14],ymm12[15]
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm6 = [4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15,20,21,26,27,16,17,22,23,28,29,18,19,24,25,30,31]
; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm5, %ymm5
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm8, %ymm9, %ymm1
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm1[2,3,0,1]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm8[0],ymm1[1,2],ymm8[3],ymm1[4,5],ymm8[6],ymm1[7],ymm8[8],ymm1[9,10],ymm8[11],ymm1[12,13],ymm8[14],ymm1[15]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm1, %ymm1
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0],xmm3[1],xmm4[2,3],xmm3[4],xmm4[5,6],xmm3[7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm3, %xmm3
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4],ymm3[5,6,7]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2],xmm0[3,4],xmm2[5],xmm0[6,7]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,10,11,0,1,6,7,12,13,14,15,0,1,2,3]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
-; AVX512DQ-FCP-NEXT: vextracti32x4 $2, %zmm0, %xmm0
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm5[5,6,7]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm5[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3,4],xmm2[5],xmm1[6,7]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[4,5,10,11,0,1,6,7,12,13,14,15,0,1,2,3]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm5[5,6,7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm5[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm8, %ymm9, %ymm0
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,3,0,1]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm2[0],ymm0[1,2],ymm2[3],ymm0[4,5],ymm2[6],ymm0[7],ymm2[8],ymm0[9,10],ymm2[11],ymm0[12,13],ymm2[14],ymm0[15]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm6, %ymm0, %ymm0
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm4[0],xmm3[1],xmm4[2,3],xmm3[4],xmm4[5,6],xmm3[7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm2, %xmm2
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm2[5,6,7]
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm7, (%rsi)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm10, (%rdx)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, (%rcx)
@@ -3500,688 +3492,668 @@ define void @load_i16_stride3_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, pt
; AVX512-LABEL: load_i16_stride3_vf64:
; AVX512: # %bb.0:
; AVX512-NEXT: vmovdqa {{.*#+}} ymm0 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
-; AVX512-NEXT: vmovdqa64 224(%rdi), %ymm20
-; AVX512-NEXT: vmovdqa64 192(%rdi), %ymm21
+; AVX512-NEXT: vmovdqa64 224(%rdi), %ymm18
+; AVX512-NEXT: vmovdqa64 192(%rdi), %ymm20
; AVX512-NEXT: vmovdqa %ymm0, %ymm1
-; AVX512-NEXT: vpternlogq $202, %ymm20, %ymm21, %ymm1
-; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm1[2,3,0,1]
-; AVX512-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[2,3],ymm3[4],ymm1[5,6],ymm3[7],ymm1[8],ymm3[9],ymm1[10,11],ymm3[12],ymm1[13,14],ymm3[15]
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,6,7,12,13,2,3,4,5,14,15,8,9,10,11,16,17,22,23,28,29,18,19,20,21,30,31,24,25,26,27]
-; AVX512-NEXT: vpshufb %ymm3, %ymm1, %ymm5
-; AVX512-NEXT: vmovdqa 272(%rdi), %xmm8
+; AVX512-NEXT: vpternlogq $202, %ymm18, %ymm20, %ymm1
+; AVX512-NEXT: vpermq {{.*#+}} ymm2 = ymm1[2,3,0,1]
+; AVX512-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7],ymm1[8],ymm2[9],ymm1[10,11],ymm2[12],ymm1[13,14],ymm2[15]
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm7 = [0,1,6,7,12,13,2,3,4,5,14,15,8,9,10,11,16,17,22,23,28,29,18,19,20,21,30,31,24,25,26,27]
+; AVX512-NEXT: vpshufb %ymm7, %ymm2, %ymm5
+; AVX512-NEXT: vmovdqa 272(%rdi), %xmm1
; AVX512-NEXT: vmovdqa 256(%rdi), %xmm2
-; AVX512-NEXT: vpblendw {{.*#+}} xmm6 = xmm2[0,1],xmm8[2],xmm2[3,4],xmm8[5],xmm2[6,7]
-; AVX512-NEXT: vmovdqa %xmm2, %xmm14
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm9 = [4,5,14,15,0,1,2,3,8,9,14,15,4,5,10,11]
-; AVX512-NEXT: vpshufb %xmm9, %xmm6, %xmm6
+; AVX512-NEXT: vpblendw {{.*#+}} xmm6 = xmm2[0,1],xmm1[2],xmm2[3,4],xmm1[5],xmm2[6,7]
+; AVX512-NEXT: vmovdqa %xmm2, %xmm3
+; AVX512-NEXT: vmovdqa64 %xmm1, %xmm19
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm13 = [4,5,14,15,0,1,2,3,8,9,14,15,4,5,10,11]
+; AVX512-NEXT: vpshufb %xmm13, %xmm6, %xmm6
; AVX512-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
; AVX512-NEXT: vpblendw {{.*#+}} ymm6 = ymm5[0,1,2],ymm6[3,4,5,6,7],ymm5[8,9,10],ymm6[11,12,13,14,15]
; AVX512-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,6,5,4,7]
; AVX512-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
-; AVX512-NEXT: vmovdqa64 320(%rdi), %ymm22
-; AVX512-NEXT: vmovdqa64 352(%rdi), %ymm23
-; AVX512-NEXT: vmovdqa %ymm0, %ymm6
-; AVX512-NEXT: vpternlogq $202, %ymm22, %ymm23, %ymm6
-; AVX512-NEXT: vpermq {{.*#+}} ymm7 = ymm6[2,3,0,1]
-; AVX512-NEXT: vpblendw {{.*#+}} ymm6 = ymm6[0],ymm7[1],ymm6[2,3],ymm7[4],ymm6[5,6],ymm7[7],ymm6[8],ymm7[9],ymm6[10,11],ymm7[12],ymm6[13,14],ymm7[15]
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm11 = [0,1,6,7,12,13,2,3,8,9,14,15,4,5,10,11,16,17,22,23,28,29,18,19,24,25,30,31,20,21,26,27]
-; AVX512-NEXT: vpshufb %ymm11, %ymm6, %ymm12
+; AVX512-NEXT: vmovdqa64 320(%rdi), %ymm21
+; AVX512-NEXT: vmovdqa64 352(%rdi), %ymm22
+; AVX512-NEXT: vmovdqa %ymm0, %ymm8
+; AVX512-NEXT: vpternlogq $202, %ymm21, %ymm22, %ymm8
+; AVX512-NEXT: vpermq {{.*#+}} ymm9 = ymm8[2,3,0,1]
+; AVX512-NEXT: vpblendw {{.*#+}} ymm8 = ymm8[0],ymm9[1],ymm8[2,3],ymm9[4],ymm8[5,6],ymm9[7],ymm8[8],ymm9[9],ymm8[10,11],ymm9[12],ymm8[13,14],ymm9[15]
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm10 = [0,1,6,7,12,13,2,3,8,9,14,15,4,5,10,11,16,17,22,23,28,29,18,19,24,25,30,31,20,21,26,27]
+; AVX512-NEXT: vpshufb %ymm10, %ymm8, %ymm11
; AVX512-NEXT: vmovdqa 304(%rdi), %xmm1
; AVX512-NEXT: vmovdqa 288(%rdi), %xmm2
-; AVX512-NEXT: vpblendw {{.*#+}} xmm13 = xmm2[0],xmm1[1],xmm2[2,3],xmm1[4],xmm2[5,6],xmm1[7]
+; AVX512-NEXT: vpblendw {{.*#+}} xmm12 = xmm2[0],xmm1[1],xmm2[2,3],xmm1[4],xmm2[5,6],xmm1[7]
; AVX512-NEXT: vmovdqa %xmm2, %xmm4
-; AVX512-NEXT: vmovdqa %xmm1, %xmm6
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm15 = [0,1,6,7,12,13,2,3,8,9,14,15,12,13,14,15]
-; AVX512-NEXT: vpshufb %xmm15, %xmm13, %xmm13
-; AVX512-NEXT: vpblendd {{.*#+}} ymm12 = ymm13[0,1,2],ymm12[3,4,5,6,7]
-; AVX512-NEXT: vinserti64x4 $1, %ymm12, %zmm5, %zmm16
-; AVX512-NEXT: vmovdqa64 128(%rdi), %ymm24
-; AVX512-NEXT: vmovdqa 160(%rdi), %ymm13
+; AVX512-NEXT: vmovdqa %xmm1, %xmm8
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm14 = [0,1,6,7,12,13,2,3,8,9,14,15,12,13,14,15]
+; AVX512-NEXT: vpshufb %xmm14, %xmm12, %xmm12
+; AVX512-NEXT: vpblendd {{.*#+}} ymm11 = ymm12[0,1,2],ymm11[3,4,5,6,7]
+; AVX512-NEXT: vinserti64x4 $1, %ymm11, %zmm5, %zmm16
+; AVX512-NEXT: vmovdqa64 128(%rdi), %ymm23
+; AVX512-NEXT: vmovdqa 160(%rdi), %ymm11
; AVX512-NEXT: vmovdqa %ymm0, %ymm5
-; AVX512-NEXT: vpternlogq $202, %ymm24, %ymm13, %ymm5
+; AVX512-NEXT: vpternlogq $202, %ymm23, %ymm11, %ymm5
; AVX512-NEXT: vpermq {{.*#+}} ymm12 = ymm5[2,3,0,1]
; AVX512-NEXT: vpblendw {{.*#+}} ymm5 = ymm5[0],ymm12[1],ymm5[2,3],ymm12[4],ymm5[5,6],ymm12[7],ymm5[8],ymm12[9],ymm5[10,11],ymm12[12],ymm5[13,14],ymm12[15]
-; AVX512-NEXT: vpshufb %ymm11, %ymm5, %ymm5
-; AVX512-NEXT: vmovdqa 112(%rdi), %xmm11
-; AVX512-NEXT: vmovdqa 96(%rdi), %xmm12
-; AVX512-NEXT: vpblendw {{.*#+}} xmm10 = xmm12[0],xmm11[1],xmm12[2,3],xmm11[4],xmm12[5,6],xmm11[7]
-; AVX512-NEXT: vpshufb %xmm15, %xmm10, %xmm10
-; AVX512-NEXT: vpblendd {{.*#+}} ymm1 = ymm10[0,1,2],ymm5[3,4,5,6,7]
-; AVX512-NEXT: vmovdqa64 (%rdi), %ymm17
-; AVX512-NEXT: vmovdqa 32(%rdi), %ymm5
+; AVX512-NEXT: vpshufb %ymm10, %ymm5, %ymm10
+; AVX512-NEXT: vmovdqa 112(%rdi), %xmm15
+; AVX512-NEXT: vmovdqa 96(%rdi), %xmm5
+; AVX512-NEXT: vpblendw {{.*#+}} xmm12 = xmm5[0],xmm15[1],xmm5[2,3],xmm15[4],xmm5[5,6],xmm15[7]
+; AVX512-NEXT: vpshufb %xmm14, %xmm12, %xmm12
+; AVX512-NEXT: vpblendd {{.*#+}} ymm6 = ymm12[0,1,2],ymm10[3,4,5,6,7]
+; AVX512-NEXT: vmovdqa64 (%rdi), %ymm24
+; AVX512-NEXT: vmovdqa 32(%rdi), %ymm12
; AVX512-NEXT: vmovdqa %ymm0, %ymm10
-; AVX512-NEXT: vpternlogq $202, %ymm5, %ymm17, %ymm10
-; AVX512-NEXT: vpermq {{.*#+}} ymm15 = ymm10[2,3,0,1]
-; AVX512-NEXT: vpblendw {{.*#+}} ymm10 = ymm10[0],ymm15[1],ymm10[2,3],ymm15[4],ymm10[5,6],ymm15[7],ymm10[8],ymm15[9],ymm10[10,11],ymm15[12],ymm10[13,14],ymm15[15]
-; AVX512-NEXT: vpshufb %ymm3, %ymm10, %ymm2
+; AVX512-NEXT: vpternlogq $202, %ymm12, %ymm24, %ymm10
+; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm10[2,3,0,1]
+; AVX512-NEXT: vpblendw {{.*#+}} ymm1 = ymm10[0],ymm1[1],ymm10[2,3],ymm1[4],ymm10[5,6],ymm1[7],ymm10[8],ymm1[9],ymm10[10,11],ymm1[12],ymm10[13,14],ymm1[15]
+; AVX512-NEXT: vpshufb %ymm7, %ymm1, %ymm7
; AVX512-NEXT: vmovdqa 80(%rdi), %xmm10
-; AVX512-NEXT: vmovdqa 64(%rdi), %xmm15
-; AVX512-NEXT: vpblendw {{.*#+}} xmm3 = xmm15[0,1],xmm10[2],xmm15[3,4],xmm10[5],xmm15[6,7]
-; AVX512-NEXT: vpshufb %xmm9, %xmm3, %xmm3
-; AVX512-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512-NEXT: vpblendw {{.*#+}} ymm3 = ymm2[0,1,2],ymm3[3,4,5,6,7],ymm2[8,9,10],ymm3[11,12,13,14,15]
-; AVX512-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,5,4,7]
-; AVX512-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm18
-; AVX512-NEXT: vmovdqa %ymm0, %ymm1
-; AVX512-NEXT: vpternlogq $202, %ymm23, %ymm22, %ymm1
-; AVX512-NEXT: vpermq {{.*#+}} ymm2 = ymm1[2,3,0,1]
-; AVX512-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7,8,9],ymm2[10],ymm1[11,12],ymm2[13],ymm1[14,15]
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm2 = [2,3,8,9,14,15,4,5,10,11,0,1,6,7,12,13,18,19,24,25,30,31,20,21,26,27,16,17,22,23,28,29]
-; AVX512-NEXT: vpshufb %ymm2, %ymm1, %ymm1
-; AVX512-NEXT: vmovdqa64 %ymm2, %ymm28
-; AVX512-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1],xmm6[2],xmm4[3,4],xmm6[5],xmm4[6,7]
-; AVX512-NEXT: vmovdqa64 %xmm6, %xmm25
+; AVX512-NEXT: vmovdqa 64(%rdi), %xmm1
+; AVX512-NEXT: vpblendw {{.*#+}} xmm2 = xmm1[0,1],xmm10[2],xmm1[3,4],xmm10[5],xmm1[6,7]
+; AVX512-NEXT: vpshufb %xmm13, %xmm2, %xmm2
+; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512-NEXT: vpblendw {{.*#+}} ymm2 = ymm7[0,1,2],ymm2[3,4,5,6,7],ymm7[8,9,10],ymm2[11,12,13,14,15]
+; AVX512-NEXT: vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,6,5,4,7]
+; AVX512-NEXT: vpblendd {{.*#+}} ymm2 = ymm7[0,1,2,3],ymm2[4,5,6,7]
+; AVX512-NEXT: vinserti64x4 $1, %ymm6, %zmm2, %zmm17
+; AVX512-NEXT: vmovdqa %ymm0, %ymm2
+; AVX512-NEXT: vpternlogq $202, %ymm22, %ymm21, %ymm2
+; AVX512-NEXT: vpermq {{.*#+}} ymm6 = ymm2[2,3,0,1]
+; AVX512-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm6[2],ymm2[3,4],ymm6[5],ymm2[6,7,8,9],ymm6[10],ymm2[11,12],ymm6[13],ymm2[14,15]
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm9 = [2,3,8,9,14,15,4,5,10,11,0,1,6,7,12,13,18,19,24,25,30,31,20,21,26,27,16,17,22,23,28,29]
+; AVX512-NEXT: vpshufb %ymm9, %ymm2, %ymm2
+; AVX512-NEXT: vpblendw {{.*#+}} xmm7 = xmm4[0,1],xmm8[2],xmm4[3,4],xmm8[5],xmm4[6,7]
+; AVX512-NEXT: vmovdqa64 %xmm8, %xmm25
; AVX512-NEXT: vmovdqa64 %xmm4, %xmm26
; AVX512-NEXT: vmovdqa {{.*#+}} xmm6 = [2,3,8,9,14,15,4,5,10,11,10,11,10,11,10,11]
-; AVX512-NEXT: vpshufb %xmm6, %xmm3, %xmm3
-; AVX512-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm1[5,6,7]
-; AVX512-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm1[4,5,6,7]
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm9 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
-; AVX512-NEXT: vmovdqa %ymm9, %ymm1
-; AVX512-NEXT: vpternlogq $202, %ymm21, %ymm20, %ymm1
-; AVX512-NEXT: vpermq {{.*#+}} ymm4 = ymm1[2,3,0,1]
-; AVX512-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm4[2],ymm1[3,4],ymm4[5],ymm1[6,7,8,9],ymm4[10],ymm1[11,12],ymm4[13],ymm1[14,15]
+; AVX512-NEXT: vpshufb %xmm6, %xmm7, %xmm7
+; AVX512-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1,2,3,4],xmm2[5,6,7]
+; AVX512-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm2[4,5,6,7]
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm13 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
+; AVX512-NEXT: vmovdqa %ymm13, %ymm2
+; AVX512-NEXT: vpternlogq $202, %ymm20, %ymm18, %ymm2
+; AVX512-NEXT: vpermq {{.*#+}} ymm4 = ymm2[2,3,0,1]
+; AVX512-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7,8,9],ymm4[10],ymm2[11,12],ymm4[13],ymm2[14,15]
; AVX512-NEXT: vmovdqa {{.*#+}} ymm4 = [2,3,8,9,14,15,4,5,12,13,10,11,0,1,6,7,18,19,24,25,30,31,20,21,28,29,26,27,16,17,22,23]
-; AVX512-NEXT: vpshufb %ymm4, %ymm1, %ymm1
-; AVX512-NEXT: vmovdqa %xmm14, %xmm7
-; AVX512-NEXT: vpblendw {{.*#+}} xmm14 = xmm8[0,1],xmm14[2],xmm8[3,4],xmm14[5],xmm8[6,7]
-; AVX512-NEXT: vmovdqa64 %xmm8, %xmm27
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = [4,5,4,5,4,5,4,5,10,11,0,1,6,7,12,13]
-; AVX512-NEXT: vpshufb %xmm2, %xmm14, %xmm14
-; AVX512-NEXT: vinserti128 $1, %xmm14, %ymm0, %ymm14
-; AVX512-NEXT: vpblendw {{.*#+}} ymm14 = ymm1[0,1,2],ymm14[3,4,5,6,7],ymm1[8,9,10],ymm14[11,12,13,14,15]
-; AVX512-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,6,7,4]
-; AVX512-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm14[4,5,6,7]
-; AVX512-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm19
-; AVX512-NEXT: vmovdqa %ymm0, %ymm1
-; AVX512-NEXT: vpternlogq $202, %ymm13, %ymm24, %ymm1
-; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm1[2,3,0,1]
-; AVX512-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7,8,9],ymm3[10],ymm1[11,12],ymm3[13],ymm1[14,15]
-; AVX512-NEXT: vmovdqa64 %ymm28, %ymm3
-; AVX512-NEXT: vpshufb %ymm3, %ymm1, %ymm1
-; AVX512-NEXT: vpblendw {{.*#+}} xmm3 = xmm12[0,1],xmm11[2],xmm12[3,4],xmm11[5],xmm12[6,7]
-; AVX512-NEXT: vpshufb %xmm6, %xmm3, %xmm3
-; AVX512-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm1[5,6,7]
-; AVX512-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
-; AVX512-NEXT: vmovdqa %ymm9, %ymm3
-; AVX512-NEXT: vpternlogq $202, %ymm17, %ymm5, %ymm3
-; AVX512-NEXT: vpermq {{.*#+}} ymm6 = ymm3[2,3,0,1]
-; AVX512-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0,1],ymm6[2],ymm3[3,4],ymm6[5],ymm3[6,7,8,9],ymm6[10],ymm3[11,12],ymm6[13],ymm3[14,15]
-; AVX512-NEXT: vpshufb %ymm4, %ymm3, %ymm3
-; AVX512-NEXT: vpblendw {{.*#+}} xmm4 = xmm10[0,1],xmm15[2],xmm10[3,4],xmm15[5],xmm10[6,7]
-; AVX512-NEXT: vpshufb %xmm2, %xmm4, %xmm2
-; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512-NEXT: vpblendw {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3,4,5,6,7],ymm3[8,9,10],ymm2[11,12,13,14,15]
-; AVX512-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,6,7,4]
-; AVX512-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
-; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
-; AVX512-NEXT: vpternlogq $226, %ymm24, %ymm9, %ymm13
-; AVX512-NEXT: vpermq {{.*#+}} ymm2 = ymm13[2,3,0,1]
-; AVX512-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0],ymm13[1,2],ymm2[3],ymm13[4,5],ymm2[6],ymm13[7],ymm2[8],ymm13[9,10],ymm2[11],ymm13[12,13],ymm2[14],ymm13[15]
-; AVX512-NEXT: vpternlogq $226, %ymm17, %ymm0, %ymm5
-; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm5[2,3,0,1]
-; AVX512-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm5[1,2],ymm3[3],ymm5[4,5],ymm3[6],ymm5[7],ymm3[8],ymm5[9,10],ymm3[11],ymm5[12,13],ymm3[14],ymm5[15]
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm4 = [4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15,20,21,26,27,16,17,22,23,28,29,18,19,24,25,30,31]
-; AVX512-NEXT: vpshufb %ymm4, %ymm3, %ymm3
-; AVX512-NEXT: vpblendw {{.*#+}} xmm5 = xmm15[0],xmm10[1],xmm15[2,3],xmm10[4],xmm15[5,6],xmm10[7]
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,2,3,0,1,6,7,12,13,2,3,8,9,14,15]
-; AVX512-NEXT: vpshufb %xmm6, %xmm5, %xmm5
-; AVX512-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm5[5,6,7]
; AVX512-NEXT: vpshufb %ymm4, %ymm2, %ymm2
-; AVX512-NEXT: vpblendw {{.*#+}} xmm5 = xmm11[0,1],xmm12[2],xmm11[3,4],xmm12[5],xmm11[6,7]
-; AVX512-NEXT: vmovdqa {{.*#+}} xmm8 = [4,5,10,11,0,1,6,7,12,13,14,15,0,1,2,3]
-; AVX512-NEXT: vpshufb %xmm8, %xmm5, %xmm5
-; AVX512-NEXT: vinserti64x4 $1, %ymm5, %zmm3, %zmm5
-; AVX512-NEXT: vextracti32x4 $2, %zmm5, %xmm5
-; AVX512-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm2[5,6,7]
-; AVX512-NEXT: vpblendd {{.*#+}} ymm2 = ymm5[0,1,2,3],ymm2[4,5,6,7]
+; AVX512-NEXT: vmovdqa64 %xmm19, %xmm8
+; AVX512-NEXT: vpblendw {{.*#+}} xmm14 = xmm8[0,1],xmm3[2],xmm8[3,4],xmm3[5],xmm8[6,7]
+; AVX512-NEXT: vmovdqa64 %xmm3, %xmm27
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm3 = [4,5,4,5,4,5,4,5,10,11,0,1,6,7,12,13]
+; AVX512-NEXT: vpshufb %xmm3, %xmm14, %xmm14
+; AVX512-NEXT: vinserti128 $1, %xmm14, %ymm0, %ymm14
+; AVX512-NEXT: vpblendw {{.*#+}} ymm14 = ymm2[0,1,2],ymm14[3,4,5,6,7],ymm2[8,9,10],ymm14[11,12,13,14,15]
+; AVX512-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,6,7,4]
+; AVX512-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm14[4,5,6,7]
+; AVX512-NEXT: vinserti64x4 $1, %ymm7, %zmm2, %zmm19
+; AVX512-NEXT: vmovdqa %ymm0, %ymm2
+; AVX512-NEXT: vpternlogq $202, %ymm11, %ymm23, %ymm2
+; AVX512-NEXT: vpermq {{.*#+}} ymm7 = ymm2[2,3,0,1]
+; AVX512-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm7[2],ymm2[3,4],ymm7[5],ymm2[6,7,8,9],ymm7[10],ymm2[11,12],ymm7[13],ymm2[14,15]
+; AVX512-NEXT: vpshufb %ymm9, %ymm2, %ymm2
+; AVX512-NEXT: vpblendw {{.*#+}} xmm7 = xmm5[0,1],xmm15[2],xmm5[3,4],xmm15[5],xmm5[6,7]
+; AVX512-NEXT: vpshufb %xmm6, %xmm7, %xmm6
+; AVX512-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm2[5,6,7]
+; AVX512-NEXT: vpblendd {{.*#+}} ymm2 = ymm6[0,1,2,3],ymm2[4,5,6,7]
+; AVX512-NEXT: vmovdqa %ymm13, %ymm6
+; AVX512-NEXT: vpternlogq $202, %ymm24, %ymm12, %ymm6
+; AVX512-NEXT: vpermq {{.*#+}} ymm7 = ymm6[2,3,0,1]
+; AVX512-NEXT: vpblendw {{.*#+}} ymm6 = ymm6[0,1],ymm7[2],ymm6[3,4],ymm7[5],ymm6[6,7,8,9],ymm7[10],ymm6[11,12],ymm7[13],ymm6[14,15]
+; AVX512-NEXT: vpshufb %ymm4, %ymm6, %ymm4
+; AVX512-NEXT: vpblendw {{.*#+}} xmm6 = xmm10[0,1],xmm1[2],xmm10[3,4],xmm1[5],xmm10[6,7]
+; AVX512-NEXT: vpshufb %xmm3, %xmm6, %xmm3
+; AVX512-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512-NEXT: vpblendw {{.*#+}} ymm3 = ymm4[0,1,2],ymm3[3,4,5,6,7],ymm4[8,9,10],ymm3[11,12,13,14,15]
+; AVX512-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,6,7,4]
+; AVX512-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
-; AVX512-NEXT: vpternlogq $202, %ymm22, %ymm23, %ymm9
-; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm9[2,3,0,1]
-; AVX512-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm9[1,2],ymm3[3],ymm9[4,5],ymm3[6],ymm9[7],ymm3[8],ymm9[9,10],ymm3[11],ymm9[12,13],ymm3[14],ymm9[15]
-; AVX512-NEXT: vpternlogq $202, %ymm21, %ymm20, %ymm0
-; AVX512-NEXT: vpermq {{.*#+}} ymm5 = ymm0[2,3,0,1]
-; AVX512-NEXT: vpblendw {{.*#+}} ymm0 = ymm5[0],ymm0[1,2],ymm5[3],ymm0[4,5],ymm5[6],ymm0[7],ymm5[8],ymm0[9,10],ymm5[11],ymm0[12,13],ymm5[14],ymm0[15]
-; AVX512-NEXT: vpshufb %ymm4, %ymm3, %ymm3
-; AVX512-NEXT: vpshufb %ymm4, %ymm0, %ymm0
+; AVX512-NEXT: vpternlogq $226, %ymm23, %ymm13, %ymm11
+; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm11[2,3,0,1]
+; AVX512-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm11[1,2],ymm3[3],ymm11[4,5],ymm3[6],ymm11[7],ymm3[8],ymm11[9,10],ymm3[11],ymm11[12,13],ymm3[14],ymm11[15]
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm11 = [4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15,20,21,26,27,16,17,22,23,28,29,18,19,24,25,30,31]
+; AVX512-NEXT: vpshufb %ymm11, %ymm3, %ymm3
+; AVX512-NEXT: vpblendw {{.*#+}} xmm4 = xmm15[0,1],xmm5[2],xmm15[3,4],xmm5[5],xmm15[6,7]
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm5 = [4,5,10,11,0,1,6,7,12,13,14,15,0,1,2,3]
+; AVX512-NEXT: vpshufb %xmm5, %xmm4, %xmm4
+; AVX512-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm3[5,6,7]
+; AVX512-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512-NEXT: vpternlogq $226, %ymm24, %ymm0, %ymm12
+; AVX512-NEXT: vpermq {{.*#+}} ymm4 = ymm12[2,3,0,1]
+; AVX512-NEXT: vpblendw {{.*#+}} ymm4 = ymm4[0],ymm12[1,2],ymm4[3],ymm12[4,5],ymm4[6],ymm12[7],ymm4[8],ymm12[9,10],ymm4[11],ymm12[12,13],ymm4[14],ymm12[15]
+; AVX512-NEXT: vpshufb %ymm11, %ymm4, %ymm4
+; AVX512-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm10[1],xmm1[2,3],xmm10[4],xmm1[5,6],xmm10[7]
+; AVX512-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,2,3,0,1,6,7,12,13,2,3,8,9,14,15]
+; AVX512-NEXT: vpshufb %xmm6, %xmm1, %xmm1
+; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512-NEXT: vpblendd {{.*#+}} ymm1 = ymm4[0,1,2,3,4],ymm1[5,6,7]
+; AVX512-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm1
+; AVX512-NEXT: vpternlogq $202, %ymm21, %ymm22, %ymm13
+; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm13[2,3,0,1]
+; AVX512-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm13[1,2],ymm3[3],ymm13[4,5],ymm3[6],ymm13[7],ymm3[8],ymm13[9,10],ymm3[11],ymm13[12,13],ymm3[14],ymm13[15]
+; AVX512-NEXT: vmovdqa64 %xmm25, %xmm4
+; AVX512-NEXT: vmovdqa64 %xmm26, %xmm7
+; AVX512-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm7[2],xmm4[3,4],xmm7[5],xmm4[6,7]
+; AVX512-NEXT: vpshufb %xmm5, %xmm4, %xmm4
+; AVX512-NEXT: vpshufb %ymm11, %ymm3, %ymm3
+; AVX512-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm3[5,6,7]
+; AVX512-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512-NEXT: vpternlogq $202, %ymm20, %ymm18, %ymm0
+; AVX512-NEXT: vpermq {{.*#+}} ymm4 = ymm0[2,3,0,1]
+; AVX512-NEXT: vpblendw {{.*#+}} ymm0 = ymm4[0],ymm0[1,2],ymm4[3],ymm0[4,5],ymm4[6],ymm0[7],ymm4[8],ymm0[9,10],ymm4[11],ymm0[12,13],ymm4[14],ymm0[15]
+; AVX512-NEXT: vpshufb %ymm11, %ymm0, %ymm0
; AVX512-NEXT: vmovdqa64 %xmm27, %xmm4
-; AVX512-NEXT: vpblendw {{.*#+}} xmm4 = xmm7[0],xmm4[1],xmm7[2,3],xmm4[4],xmm7[5,6],xmm4[7]
+; AVX512-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0],xmm8[1],xmm4[2,3],xmm8[4],xmm4[5,6],xmm8[7]
; AVX512-NEXT: vpshufb %xmm6, %xmm4, %xmm4
; AVX512-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm4[5,6,7]
-; AVX512-NEXT: vmovdqa64 %xmm25, %xmm4
-; AVX512-NEXT: vmovdqa64 %xmm26, %xmm5
-; AVX512-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2],xmm4[3,4],xmm5[5],xmm4[6,7]
-; AVX512-NEXT: vpshufb %xmm8, %xmm4, %xmm4
-; AVX512-NEXT: vinserti64x4 $1, %ymm4, %zmm0, %zmm4
-; AVX512-NEXT: vextracti32x4 $2, %zmm4, %xmm4
-; AVX512-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm3[5,6,7]
-; AVX512-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
; AVX512-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm0
-; AVX512-NEXT: vmovdqa64 %zmm18, (%rsi)
+; AVX512-NEXT: vmovdqa64 %zmm17, (%rsi)
; AVX512-NEXT: vmovdqa64 %zmm16, 64(%rsi)
; AVX512-NEXT: vmovdqa64 %zmm19, 64(%rdx)
-; AVX512-NEXT: vmovdqa64 %zmm1, (%rdx)
+; AVX512-NEXT: vmovdqa64 %zmm2, (%rdx)
; AVX512-NEXT: vmovdqa64 %zmm0, 64(%rcx)
-; AVX512-NEXT: vmovdqa64 %zmm2, (%rcx)
+; AVX512-NEXT: vmovdqa64 %zmm1, (%rcx)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
; AVX512-FCP-LABEL: load_i16_stride3_vf64:
; AVX512-FCP: # %bb.0:
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
-; AVX512-FCP-NEXT: vmovdqa64 224(%rdi), %ymm20
-; AVX512-FCP-NEXT: vmovdqa64 192(%rdi), %ymm21
+; AVX512-FCP-NEXT: vmovdqa64 224(%rdi), %ymm18
+; AVX512-FCP-NEXT: vmovdqa64 192(%rdi), %ymm20
; AVX512-FCP-NEXT: vmovdqa %ymm0, %ymm1
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm20, %ymm21, %ymm1
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm1[2,3,0,1]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[2,3],ymm3[4],ymm1[5,6],ymm3[7],ymm1[8],ymm3[9],ymm1[10,11],ymm3[12],ymm1[13,14],ymm3[15]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,6,7,12,13,2,3,4,5,14,15,8,9,10,11,16,17,22,23,28,29,18,19,20,21,30,31,24,25,26,27]
-; AVX512-FCP-NEXT: vpshufb %ymm3, %ymm1, %ymm5
-; AVX512-FCP-NEXT: vmovdqa 272(%rdi), %xmm8
+; AVX512-FCP-NEXT: vpternlogq $202, %ymm18, %ymm20, %ymm1
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm1[2,3,0,1]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7],ymm1[8],ymm2[9],ymm1[10,11],ymm2[12],ymm1[13,14],ymm2[15]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm7 = [0,1,6,7,12,13,2,3,4,5,14,15,8,9,10,11,16,17,22,23,28,29,18,19,20,21,30,31,24,25,26,27]
+; AVX512-FCP-NEXT: vpshufb %ymm7, %ymm2, %ymm5
+; AVX512-FCP-NEXT: vmovdqa 272(%rdi), %xmm1
; AVX512-FCP-NEXT: vmovdqa 256(%rdi), %xmm2
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm2[0,1],xmm8[2],xmm2[3,4],xmm8[5],xmm2[6,7]
-; AVX512-FCP-NEXT: vmovdqa %xmm2, %xmm14
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [4,5,14,15,0,1,2,3,8,9,14,15,4,5,10,11]
-; AVX512-FCP-NEXT: vpshufb %xmm9, %xmm6, %xmm6
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm2[0,1],xmm1[2],xmm2[3,4],xmm1[5],xmm2[6,7]
+; AVX512-FCP-NEXT: vmovdqa %xmm2, %xmm3
+; AVX512-FCP-NEXT: vmovdqa64 %xmm1, %xmm19
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm13 = [4,5,14,15,0,1,2,3,8,9,14,15,4,5,10,11]
+; AVX512-FCP-NEXT: vpshufb %xmm13, %xmm6, %xmm6
; AVX512-FCP-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm6 = ymm5[0,1,2],ymm6[3,4,5,6,7],ymm5[8,9,10],ymm6[11,12,13,14,15]
; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,6,5,4,7]
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
-; AVX512-FCP-NEXT: vmovdqa64 320(%rdi), %ymm22
-; AVX512-FCP-NEXT: vmovdqa64 352(%rdi), %ymm23
-; AVX512-FCP-NEXT: vmovdqa %ymm0, %ymm6
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm22, %ymm23, %ymm6
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm6[2,3,0,1]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm6 = ymm6[0],ymm7[1],ymm6[2,3],ymm7[4],ymm6[5,6],ymm7[7],ymm6[8],ymm7[9],ymm6[10,11],ymm7[12],ymm6[13,14],ymm7[15]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [0,1,6,7,12,13,2,3,8,9,14,15,4,5,10,11,16,17,22,23,28,29,18,19,24,25,30,31,20,21,26,27]
-; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm6, %ymm12
+; AVX512-FCP-NEXT: vmovdqa64 320(%rdi), %ymm21
+; AVX512-FCP-NEXT: vmovdqa64 352(%rdi), %ymm22
+; AVX512-FCP-NEXT: vmovdqa %ymm0, %ymm8
+; AVX512-FCP-NEXT: vpternlogq $202, %ymm21, %ymm22, %ymm8
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm8[2,3,0,1]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm8 = ymm8[0],ymm9[1],ymm8[2,3],ymm9[4],ymm8[5,6],ymm9[7],ymm8[8],ymm9[9],ymm8[10,11],ymm9[12],ymm8[13,14],ymm9[15]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [0,1,6,7,12,13,2,3,8,9,14,15,4,5,10,11,16,17,22,23,28,29,18,19,24,25,30,31,20,21,26,27]
+; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm8, %ymm11
; AVX512-FCP-NEXT: vmovdqa 304(%rdi), %xmm1
; AVX512-FCP-NEXT: vmovdqa 288(%rdi), %xmm2
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm13 = xmm2[0],xmm1[1],xmm2[2,3],xmm1[4],xmm2[5,6],xmm1[7]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm12 = xmm2[0],xmm1[1],xmm2[2,3],xmm1[4],xmm2[5,6],xmm1[7]
; AVX512-FCP-NEXT: vmovdqa %xmm2, %xmm4
-; AVX512-FCP-NEXT: vmovdqa %xmm1, %xmm6
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm15 = [0,1,6,7,12,13,2,3,8,9,14,15,12,13,14,15]
-; AVX512-FCP-NEXT: vpshufb %xmm15, %xmm13, %xmm13
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm13[0,1,2],ymm12[3,4,5,6,7]
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm12, %zmm5, %zmm16
-; AVX512-FCP-NEXT: vmovdqa64 128(%rdi), %ymm24
-; AVX512-FCP-NEXT: vmovdqa 160(%rdi), %ymm13
+; AVX512-FCP-NEXT: vmovdqa %xmm1, %xmm8
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm14 = [0,1,6,7,12,13,2,3,8,9,14,15,12,13,14,15]
+; AVX512-FCP-NEXT: vpshufb %xmm14, %xmm12, %xmm12
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm12[0,1,2],ymm11[3,4,5,6,7]
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm5, %zmm16
+; AVX512-FCP-NEXT: vmovdqa64 128(%rdi), %ymm23
+; AVX512-FCP-NEXT: vmovdqa 160(%rdi), %ymm11
; AVX512-FCP-NEXT: vmovdqa %ymm0, %ymm5
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm24, %ymm13, %ymm5
+; AVX512-FCP-NEXT: vpternlogq $202, %ymm23, %ymm11, %ymm5
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm5[2,3,0,1]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm5 = ymm5[0],ymm12[1],ymm5[2,3],ymm12[4],ymm5[5,6],ymm12[7],ymm5[8],ymm12[9],ymm5[10,11],ymm12[12],ymm5[13,14],ymm12[15]
-; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm5, %ymm5
-; AVX512-FCP-NEXT: vmovdqa 112(%rdi), %xmm11
-; AVX512-FCP-NEXT: vmovdqa 96(%rdi), %xmm12
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm10 = xmm12[0],xmm11[1],xmm12[2,3],xmm11[4],xmm12[5,6],xmm11[7]
-; AVX512-FCP-NEXT: vpshufb %xmm15, %xmm10, %xmm10
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm10[0,1,2],ymm5[3,4,5,6,7]
-; AVX512-FCP-NEXT: vmovdqa64 (%rdi), %ymm17
-; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm5
+; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm5, %ymm10
+; AVX512-FCP-NEXT: vmovdqa 112(%rdi), %xmm15
+; AVX512-FCP-NEXT: vmovdqa 96(%rdi), %xmm5
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm12 = xmm5[0],xmm15[1],xmm5[2,3],xmm15[4],xmm5[5,6],xmm15[7]
+; AVX512-FCP-NEXT: vpshufb %xmm14, %xmm12, %xmm12
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm12[0,1,2],ymm10[3,4,5,6,7]
+; AVX512-FCP-NEXT: vmovdqa64 (%rdi), %ymm24
+; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm12
; AVX512-FCP-NEXT: vmovdqa %ymm0, %ymm10
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm5, %ymm17, %ymm10
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm15 = ymm10[2,3,0,1]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm10 = ymm10[0],ymm15[1],ymm10[2,3],ymm15[4],ymm10[5,6],ymm15[7],ymm10[8],ymm15[9],ymm10[10,11],ymm15[12],ymm10[13,14],ymm15[15]
-; AVX512-FCP-NEXT: vpshufb %ymm3, %ymm10, %ymm2
+; AVX512-FCP-NEXT: vpternlogq $202, %ymm12, %ymm24, %ymm10
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm10[2,3,0,1]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm10[0],ymm1[1],ymm10[2,3],ymm1[4],ymm10[5,6],ymm1[7],ymm10[8],ymm1[9],ymm10[10,11],ymm1[12],ymm10[13,14],ymm1[15]
+; AVX512-FCP-NEXT: vpshufb %ymm7, %ymm1, %ymm7
; AVX512-FCP-NEXT: vmovdqa 80(%rdi), %xmm10
-; AVX512-FCP-NEXT: vmovdqa 64(%rdi), %xmm15
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm15[0,1],xmm10[2],xmm15[3,4],xmm10[5],xmm15[6,7]
-; AVX512-FCP-NEXT: vpshufb %xmm9, %xmm3, %xmm3
-; AVX512-FCP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm2[0,1,2],ymm3[3,4,5,6,7],ymm2[8,9,10],ymm3[11,12,13,14,15]
-; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,5,4,7]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm18
-; AVX512-FCP-NEXT: vmovdqa %ymm0, %ymm1
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm23, %ymm22, %ymm1
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm1[2,3,0,1]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7,8,9],ymm2[10],ymm1[11,12],ymm2[13],ymm1[14,15]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [2,3,8,9,14,15,4,5,10,11,0,1,6,7,12,13,18,19,24,25,30,31,20,21,26,27,16,17,22,23,28,29]
-; AVX512-FCP-NEXT: vpshufb %ymm2, %ymm1, %ymm1
-; AVX512-FCP-NEXT: vmovdqa64 %ymm2, %ymm28
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1],xmm6[2],xmm4[3,4],xmm6[5],xmm4[6,7]
-; AVX512-FCP-NEXT: vmovdqa64 %xmm6, %xmm25
+; AVX512-FCP-NEXT: vmovdqa 64(%rdi), %xmm1
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm1[0,1],xmm10[2],xmm1[3,4],xmm10[5],xmm1[6,7]
+; AVX512-FCP-NEXT: vpshufb %xmm13, %xmm2, %xmm2
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm7[0,1,2],ymm2[3,4,5,6,7],ymm7[8,9,10],ymm2[11,12,13,14,15]
+; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,6,5,4,7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm7[0,1,2,3],ymm2[4,5,6,7]
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm2, %zmm17
+; AVX512-FCP-NEXT: vmovdqa %ymm0, %ymm2
+; AVX512-FCP-NEXT: vpternlogq $202, %ymm22, %ymm21, %ymm2
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm2[2,3,0,1]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm6[2],ymm2[3,4],ymm6[5],ymm2[6,7,8,9],ymm6[10],ymm2[11,12],ymm6[13],ymm2[14,15]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [2,3,8,9,14,15,4,5,10,11,0,1,6,7,12,13,18,19,24,25,30,31,20,21,26,27,16,17,22,23,28,29]
+; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm2, %ymm2
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm4[0,1],xmm8[2],xmm4[3,4],xmm8[5],xmm4[6,7]
+; AVX512-FCP-NEXT: vmovdqa64 %xmm8, %xmm25
; AVX512-FCP-NEXT: vmovdqa64 %xmm4, %xmm26
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [2,3,8,9,14,15,4,5,10,11,10,11,10,11,10,11]
-; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm3, %xmm3
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm1[5,6,7]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm1[4,5,6,7]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
-; AVX512-FCP-NEXT: vmovdqa %ymm9, %ymm1
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm21, %ymm20, %ymm1
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm1[2,3,0,1]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm4[2],ymm1[3,4],ymm4[5],ymm1[6,7,8,9],ymm4[10],ymm1[11,12],ymm4[13],ymm1[14,15]
+; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm7, %xmm7
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1,2,3,4],xmm2[5,6,7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm2[4,5,6,7]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm13 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
+; AVX512-FCP-NEXT: vmovdqa %ymm13, %ymm2
+; AVX512-FCP-NEXT: vpternlogq $202, %ymm20, %ymm18, %ymm2
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm2[2,3,0,1]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7,8,9],ymm4[10],ymm2[11,12],ymm4[13],ymm2[14,15]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [2,3,8,9,14,15,4,5,12,13,10,11,0,1,6,7,18,19,24,25,30,31,20,21,28,29,26,27,16,17,22,23]
-; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm1, %ymm1
-; AVX512-FCP-NEXT: vmovdqa %xmm14, %xmm7
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm14 = xmm8[0,1],xmm14[2],xmm8[3,4],xmm14[5],xmm8[6,7]
-; AVX512-FCP-NEXT: vmovdqa64 %xmm8, %xmm27
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm2 = [4,5,4,5,4,5,4,5,10,11,0,1,6,7,12,13]
-; AVX512-FCP-NEXT: vpshufb %xmm2, %xmm14, %xmm14
-; AVX512-FCP-NEXT: vinserti128 $1, %xmm14, %ymm0, %ymm14
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm14 = ymm1[0,1,2],ymm14[3,4,5,6,7],ymm1[8,9,10],ymm14[11,12,13,14,15]
-; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,6,7,4]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm14[4,5,6,7]
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm19
-; AVX512-FCP-NEXT: vmovdqa %ymm0, %ymm1
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm13, %ymm24, %ymm1
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm1[2,3,0,1]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7,8,9],ymm3[10],ymm1[11,12],ymm3[13],ymm1[14,15]
-; AVX512-FCP-NEXT: vmovdqa64 %ymm28, %ymm3
-; AVX512-FCP-NEXT: vpshufb %ymm3, %ymm1, %ymm1
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm12[0,1],xmm11[2],xmm12[3,4],xmm11[5],xmm12[6,7]
-; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm3, %xmm3
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm1[5,6,7]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
-; AVX512-FCP-NEXT: vmovdqa %ymm9, %ymm3
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm17, %ymm5, %ymm3
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm3[2,3,0,1]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0,1],ymm6[2],ymm3[3,4],ymm6[5],ymm3[6,7,8,9],ymm6[10],ymm3[11,12],ymm6[13],ymm3[14,15]
-; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm3, %ymm3
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm10[0,1],xmm15[2],xmm10[3,4],xmm15[5],xmm10[6,7]
-; AVX512-FCP-NEXT: vpshufb %xmm2, %xmm4, %xmm2
-; AVX512-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3,4,5,6,7],ymm3[8,9,10],ymm2[11,12,13,14,15]
-; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,6,7,4]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
-; AVX512-FCP-NEXT: vpternlogq $226, %ymm24, %ymm9, %ymm13
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm13[2,3,0,1]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0],ymm13[1,2],ymm2[3],ymm13[4,5],ymm2[6],ymm13[7],ymm2[8],ymm13[9,10],ymm2[11],ymm13[12,13],ymm2[14],ymm13[15]
-; AVX512-FCP-NEXT: vpternlogq $226, %ymm17, %ymm0, %ymm5
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm5[2,3,0,1]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm5[1,2],ymm3[3],ymm5[4,5],ymm3[6],ymm5[7],ymm3[8],ymm5[9,10],ymm3[11],ymm5[12,13],ymm3[14],ymm5[15]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15,20,21,26,27,16,17,22,23,28,29,18,19,24,25,30,31]
-; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm3, %ymm3
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm15[0],xmm10[1],xmm15[2,3],xmm10[4],xmm15[5,6],xmm10[7]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,2,3,0,1,6,7,12,13,2,3,8,9,14,15]
-; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm5, %xmm5
-; AVX512-FCP-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm5[5,6,7]
; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm2, %ymm2
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm11[0,1],xmm12[2],xmm11[3,4],xmm12[5],xmm11[6,7]
-; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [4,5,10,11,0,1,6,7,12,13,14,15,0,1,2,3]
-; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm5, %xmm5
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm3, %zmm5
-; AVX512-FCP-NEXT: vextracti32x4 $2, %zmm5, %xmm5
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm2[5,6,7]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm5[0,1,2,3],ymm2[4,5,6,7]
+; AVX512-FCP-NEXT: vmovdqa64 %xmm19, %xmm8
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm14 = xmm8[0,1],xmm3[2],xmm8[3,4],xmm3[5],xmm8[6,7]
+; AVX512-FCP-NEXT: vmovdqa64 %xmm3, %xmm27
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [4,5,4,5,4,5,4,5,10,11,0,1,6,7,12,13]
+; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm14, %xmm14
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm14, %ymm0, %ymm14
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm14 = ymm2[0,1,2],ymm14[3,4,5,6,7],ymm2[8,9,10],ymm14[11,12,13,14,15]
+; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,6,7,4]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm14[4,5,6,7]
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm2, %zmm19
+; AVX512-FCP-NEXT: vmovdqa %ymm0, %ymm2
+; AVX512-FCP-NEXT: vpternlogq $202, %ymm11, %ymm23, %ymm2
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm2[2,3,0,1]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm7[2],ymm2[3,4],ymm7[5],ymm2[6,7,8,9],ymm7[10],ymm2[11,12],ymm7[13],ymm2[14,15]
+; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm2, %ymm2
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm5[0,1],xmm15[2],xmm5[3,4],xmm15[5],xmm5[6,7]
+; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm7, %xmm6
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm2[5,6,7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm6[0,1,2,3],ymm2[4,5,6,7]
+; AVX512-FCP-NEXT: vmovdqa %ymm13, %ymm6
+; AVX512-FCP-NEXT: vpternlogq $202, %ymm24, %ymm12, %ymm6
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm6[2,3,0,1]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm6 = ymm6[0,1],ymm7[2],ymm6[3,4],ymm7[5],ymm6[6,7,8,9],ymm7[10],ymm6[11,12],ymm7[13],ymm6[14,15]
+; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm6, %ymm4
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm10[0,1],xmm1[2],xmm10[3,4],xmm1[5],xmm10[6,7]
+; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm6, %xmm3
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm4[0,1,2],ymm3[3,4,5,6,7],ymm4[8,9,10],ymm3[11,12,13,14,15]
+; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,6,7,4]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm22, %ymm23, %ymm9
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm9[2,3,0,1]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm9[1,2],ymm3[3],ymm9[4,5],ymm3[6],ymm9[7],ymm3[8],ymm9[9,10],ymm3[11],ymm9[12,13],ymm3[14],ymm9[15]
-; AVX512-FCP-NEXT: vpternlogq $202, %ymm21, %ymm20, %ymm0
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm0[2,3,0,1]
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm5[0],ymm0[1,2],ymm5[3],ymm0[4,5],ymm5[6],ymm0[7],ymm5[8],ymm0[9,10],ymm5[11],ymm0[12,13],ymm5[14],ymm0[15]
-; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm3, %ymm3
-; AVX512-FCP-NEXT: vpshufb %ymm4, %ymm0, %ymm0
+; AVX512-FCP-NEXT: vpternlogq $226, %ymm23, %ymm13, %ymm11
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm11[2,3,0,1]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm11[1,2],ymm3[3],ymm11[4,5],ymm3[6],ymm11[7],ymm3[8],ymm11[9,10],ymm3[11],ymm11[12,13],ymm3[14],ymm11[15]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15,20,21,26,27,16,17,22,23,28,29,18,19,24,25,30,31]
+; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm3, %ymm3
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm15[0,1],xmm5[2],xmm15[3,4],xmm5[5],xmm15[6,7]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [4,5,10,11,0,1,6,7,12,13,14,15,0,1,2,3]
+; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm4, %xmm4
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm3[5,6,7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512-FCP-NEXT: vpternlogq $226, %ymm24, %ymm0, %ymm12
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm12[2,3,0,1]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm4 = ymm4[0],ymm12[1,2],ymm4[3],ymm12[4,5],ymm4[6],ymm12[7],ymm4[8],ymm12[9,10],ymm4[11],ymm12[12,13],ymm4[14],ymm12[15]
+; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm4, %ymm4
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm10[1],xmm1[2,3],xmm10[4],xmm1[5,6],xmm10[7]
+; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,2,3,0,1,6,7,12,13,2,3,8,9,14,15]
+; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm1, %xmm1
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm4[0,1,2,3,4],ymm1[5,6,7]
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm1
+; AVX512-FCP-NEXT: vpternlogq $202, %ymm21, %ymm22, %ymm13
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm13[2,3,0,1]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm13[1,2],ymm3[3],ymm13[4,5],ymm3[6],ymm13[7],ymm3[8],ymm13[9,10],ymm3[11],ymm13[12,13],ymm3[14],ymm13[15]
+; AVX512-FCP-NEXT: vmovdqa64 %xmm25, %xmm4
+; AVX512-FCP-NEXT: vmovdqa64 %xmm26, %xmm7
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm7[2],xmm4[3,4],xmm7[5],xmm4[6,7]
+; AVX512-FCP-NEXT: vpshufb %xmm5, %xmm4, %xmm4
+; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm3, %ymm3
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm3[5,6,7]
+; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512-FCP-NEXT: vpternlogq $202, %ymm20, %ymm18, %ymm0
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm0[2,3,0,1]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm4[0],ymm0[1,2],ymm4[3],ymm0[4,5],ymm4[6],ymm0[7],ymm4[8],ymm0[9,10],ymm4[11],ymm0[12,13],ymm4[14],ymm0[15]
+; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm0, %ymm0
; AVX512-FCP-NEXT: vmovdqa64 %xmm27, %xmm4
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm7[0],xmm4[1],xmm7[2,3],xmm4[4],xmm7[5,6],xmm4[7]
+; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0],xmm8[1],xmm4[2,3],xmm8[4],xmm4[5,6],xmm8[7]
; AVX512-FCP-NEXT: vpshufb %xmm6, %xmm4, %xmm4
; AVX512-FCP-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm4[5,6,7]
-; AVX512-FCP-NEXT: vmovdqa64 %xmm25, %xmm4
-; AVX512-FCP-NEXT: vmovdqa64 %xmm26, %xmm5
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2],xmm4[3,4],xmm5[5],xmm4[6,7]
-; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm4, %xmm4
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm0, %zmm4
-; AVX512-FCP-NEXT: vextracti32x4 $2, %zmm4, %xmm4
-; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm3[5,6,7]
-; AVX512-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm0
-; AVX512-FCP-NEXT: vmovdqa64 %zmm18, (%rsi)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm17, (%rsi)
; AVX512-FCP-NEXT: vmovdqa64 %zmm16, 64(%rsi)
; AVX512-FCP-NEXT: vmovdqa64 %zmm19, 64(%rdx)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm1, (%rdx)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm2, (%rdx)
; AVX512-FCP-NEXT: vmovdqa64 %zmm0, 64(%rcx)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm2, (%rcx)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm1, (%rcx)
; AVX512-FCP-NEXT: vzeroupper
; AVX512-FCP-NEXT: retq
;
; AVX512DQ-LABEL: load_i16_stride3_vf64:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm0 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
-; AVX512DQ-NEXT: vmovdqa64 224(%rdi), %ymm20
-; AVX512DQ-NEXT: vmovdqa64 192(%rdi), %ymm21
+; AVX512DQ-NEXT: vmovdqa64 224(%rdi), %ymm18
+; AVX512DQ-NEXT: vmovdqa64 192(%rdi), %ymm20
; AVX512DQ-NEXT: vmovdqa %ymm0, %ymm1
-; AVX512DQ-NEXT: vpternlogq $202, %ymm20, %ymm21, %ymm1
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm3 = ymm1[2,3,0,1]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[2,3],ymm3[4],ymm1[5,6],ymm3[7],ymm1[8],ymm3[9],ymm1[10,11],ymm3[12],ymm1[13,14],ymm3[15]
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,6,7,12,13,2,3,4,5,14,15,8,9,10,11,16,17,22,23,28,29,18,19,20,21,30,31,24,25,26,27]
-; AVX512DQ-NEXT: vpshufb %ymm3, %ymm1, %ymm5
-; AVX512DQ-NEXT: vmovdqa 272(%rdi), %xmm8
+; AVX512DQ-NEXT: vpternlogq $202, %ymm18, %ymm20, %ymm1
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm2 = ymm1[2,3,0,1]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7],ymm1[8],ymm2[9],ymm1[10,11],ymm2[12],ymm1[13,14],ymm2[15]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm7 = [0,1,6,7,12,13,2,3,4,5,14,15,8,9,10,11,16,17,22,23,28,29,18,19,20,21,30,31,24,25,26,27]
+; AVX512DQ-NEXT: vpshufb %ymm7, %ymm2, %ymm5
+; AVX512DQ-NEXT: vmovdqa 272(%rdi), %xmm1
; AVX512DQ-NEXT: vmovdqa 256(%rdi), %xmm2
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm6 = xmm2[0,1],xmm8[2],xmm2[3,4],xmm8[5],xmm2[6,7]
-; AVX512DQ-NEXT: vmovdqa %xmm2, %xmm14
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm9 = [4,5,14,15,0,1,2,3,8,9,14,15,4,5,10,11]
-; AVX512DQ-NEXT: vpshufb %xmm9, %xmm6, %xmm6
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm6 = xmm2[0,1],xmm1[2],xmm2[3,4],xmm1[5],xmm2[6,7]
+; AVX512DQ-NEXT: vmovdqa %xmm2, %xmm3
+; AVX512DQ-NEXT: vmovdqa64 %xmm1, %xmm19
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm13 = [4,5,14,15,0,1,2,3,8,9,14,15,4,5,10,11]
+; AVX512DQ-NEXT: vpshufb %xmm13, %xmm6, %xmm6
; AVX512DQ-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm6 = ymm5[0,1,2],ymm6[3,4,5,6,7],ymm5[8,9,10],ymm6[11,12,13,14,15]
; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,6,5,4,7]
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
-; AVX512DQ-NEXT: vmovdqa64 320(%rdi), %ymm22
-; AVX512DQ-NEXT: vmovdqa64 352(%rdi), %ymm23
-; AVX512DQ-NEXT: vmovdqa %ymm0, %ymm6
-; AVX512DQ-NEXT: vpternlogq $202, %ymm22, %ymm23, %ymm6
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm7 = ymm6[2,3,0,1]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm6 = ymm6[0],ymm7[1],ymm6[2,3],ymm7[4],ymm6[5,6],ymm7[7],ymm6[8],ymm7[9],ymm6[10,11],ymm7[12],ymm6[13,14],ymm7[15]
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm11 = [0,1,6,7,12,13,2,3,8,9,14,15,4,5,10,11,16,17,22,23,28,29,18,19,24,25,30,31,20,21,26,27]
-; AVX512DQ-NEXT: vpshufb %ymm11, %ymm6, %ymm12
+; AVX512DQ-NEXT: vmovdqa64 320(%rdi), %ymm21
+; AVX512DQ-NEXT: vmovdqa64 352(%rdi), %ymm22
+; AVX512DQ-NEXT: vmovdqa %ymm0, %ymm8
+; AVX512DQ-NEXT: vpternlogq $202, %ymm21, %ymm22, %ymm8
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm9 = ymm8[2,3,0,1]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm8 = ymm8[0],ymm9[1],ymm8[2,3],ymm9[4],ymm8[5,6],ymm9[7],ymm8[8],ymm9[9],ymm8[10,11],ymm9[12],ymm8[13,14],ymm9[15]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm10 = [0,1,6,7,12,13,2,3,8,9,14,15,4,5,10,11,16,17,22,23,28,29,18,19,24,25,30,31,20,21,26,27]
+; AVX512DQ-NEXT: vpshufb %ymm10, %ymm8, %ymm11
; AVX512DQ-NEXT: vmovdqa 304(%rdi), %xmm1
; AVX512DQ-NEXT: vmovdqa 288(%rdi), %xmm2
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm13 = xmm2[0],xmm1[1],xmm2[2,3],xmm1[4],xmm2[5,6],xmm1[7]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm12 = xmm2[0],xmm1[1],xmm2[2,3],xmm1[4],xmm2[5,6],xmm1[7]
; AVX512DQ-NEXT: vmovdqa %xmm2, %xmm4
-; AVX512DQ-NEXT: vmovdqa %xmm1, %xmm6
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm15 = [0,1,6,7,12,13,2,3,8,9,14,15,12,13,14,15]
-; AVX512DQ-NEXT: vpshufb %xmm15, %xmm13, %xmm13
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm12 = ymm13[0,1,2],ymm12[3,4,5,6,7]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm12, %zmm5, %zmm16
-; AVX512DQ-NEXT: vmovdqa64 128(%rdi), %ymm24
-; AVX512DQ-NEXT: vmovdqa 160(%rdi), %ymm13
+; AVX512DQ-NEXT: vmovdqa %xmm1, %xmm8
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm14 = [0,1,6,7,12,13,2,3,8,9,14,15,12,13,14,15]
+; AVX512DQ-NEXT: vpshufb %xmm14, %xmm12, %xmm12
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm11 = ymm12[0,1,2],ymm11[3,4,5,6,7]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm11, %zmm5, %zmm16
+; AVX512DQ-NEXT: vmovdqa64 128(%rdi), %ymm23
+; AVX512DQ-NEXT: vmovdqa 160(%rdi), %ymm11
; AVX512DQ-NEXT: vmovdqa %ymm0, %ymm5
-; AVX512DQ-NEXT: vpternlogq $202, %ymm24, %ymm13, %ymm5
+; AVX512DQ-NEXT: vpternlogq $202, %ymm23, %ymm11, %ymm5
; AVX512DQ-NEXT: vpermq {{.*#+}} ymm12 = ymm5[2,3,0,1]
; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm5 = ymm5[0],ymm12[1],ymm5[2,3],ymm12[4],ymm5[5,6],ymm12[7],ymm5[8],ymm12[9],ymm5[10,11],ymm12[12],ymm5[13,14],ymm12[15]
-; AVX512DQ-NEXT: vpshufb %ymm11, %ymm5, %ymm5
-; AVX512DQ-NEXT: vmovdqa 112(%rdi), %xmm11
-; AVX512DQ-NEXT: vmovdqa 96(%rdi), %xmm12
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm10 = xmm12[0],xmm11[1],xmm12[2,3],xmm11[4],xmm12[5,6],xmm11[7]
-; AVX512DQ-NEXT: vpshufb %xmm15, %xmm10, %xmm10
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm1 = ymm10[0,1,2],ymm5[3,4,5,6,7]
-; AVX512DQ-NEXT: vmovdqa64 (%rdi), %ymm17
-; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm5
+; AVX512DQ-NEXT: vpshufb %ymm10, %ymm5, %ymm10
+; AVX512DQ-NEXT: vmovdqa 112(%rdi), %xmm15
+; AVX512DQ-NEXT: vmovdqa 96(%rdi), %xmm5
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm12 = xmm5[0],xmm15[1],xmm5[2,3],xmm15[4],xmm5[5,6],xmm15[7]
+; AVX512DQ-NEXT: vpshufb %xmm14, %xmm12, %xmm12
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm6 = ymm12[0,1,2],ymm10[3,4,5,6,7]
+; AVX512DQ-NEXT: vmovdqa64 (%rdi), %ymm24
+; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm12
; AVX512DQ-NEXT: vmovdqa %ymm0, %ymm10
-; AVX512DQ-NEXT: vpternlogq $202, %ymm5, %ymm17, %ymm10
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm15 = ymm10[2,3,0,1]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm10 = ymm10[0],ymm15[1],ymm10[2,3],ymm15[4],ymm10[5,6],ymm15[7],ymm10[8],ymm15[9],ymm10[10,11],ymm15[12],ymm10[13,14],ymm15[15]
-; AVX512DQ-NEXT: vpshufb %ymm3, %ymm10, %ymm2
+; AVX512DQ-NEXT: vpternlogq $202, %ymm12, %ymm24, %ymm10
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm1 = ymm10[2,3,0,1]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm1 = ymm10[0],ymm1[1],ymm10[2,3],ymm1[4],ymm10[5,6],ymm1[7],ymm10[8],ymm1[9],ymm10[10,11],ymm1[12],ymm10[13,14],ymm1[15]
+; AVX512DQ-NEXT: vpshufb %ymm7, %ymm1, %ymm7
; AVX512DQ-NEXT: vmovdqa 80(%rdi), %xmm10
-; AVX512DQ-NEXT: vmovdqa 64(%rdi), %xmm15
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm3 = xmm15[0,1],xmm10[2],xmm15[3,4],xmm10[5],xmm15[6,7]
-; AVX512DQ-NEXT: vpshufb %xmm9, %xmm3, %xmm3
-; AVX512DQ-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm3 = ymm2[0,1,2],ymm3[3,4,5,6,7],ymm2[8,9,10],ymm3[11,12,13,14,15]
-; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,5,4,7]
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm18
-; AVX512DQ-NEXT: vmovdqa %ymm0, %ymm1
-; AVX512DQ-NEXT: vpternlogq $202, %ymm23, %ymm22, %ymm1
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm2 = ymm1[2,3,0,1]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7,8,9],ymm2[10],ymm1[11,12],ymm2[13],ymm1[14,15]
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [2,3,8,9,14,15,4,5,10,11,0,1,6,7,12,13,18,19,24,25,30,31,20,21,26,27,16,17,22,23,28,29]
-; AVX512DQ-NEXT: vpshufb %ymm2, %ymm1, %ymm1
-; AVX512DQ-NEXT: vmovdqa64 %ymm2, %ymm28
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1],xmm6[2],xmm4[3,4],xmm6[5],xmm4[6,7]
-; AVX512DQ-NEXT: vmovdqa64 %xmm6, %xmm25
+; AVX512DQ-NEXT: vmovdqa 64(%rdi), %xmm1
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm2 = xmm1[0,1],xmm10[2],xmm1[3,4],xmm10[5],xmm1[6,7]
+; AVX512DQ-NEXT: vpshufb %xmm13, %xmm2, %xmm2
+; AVX512DQ-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm2 = ymm7[0,1,2],ymm2[3,4,5,6,7],ymm7[8,9,10],ymm2[11,12,13,14,15]
+; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,6,5,4,7]
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm2 = ymm7[0,1,2,3],ymm2[4,5,6,7]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm6, %zmm2, %zmm17
+; AVX512DQ-NEXT: vmovdqa %ymm0, %ymm2
+; AVX512DQ-NEXT: vpternlogq $202, %ymm22, %ymm21, %ymm2
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm6 = ymm2[2,3,0,1]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm6[2],ymm2[3,4],ymm6[5],ymm2[6,7,8,9],ymm6[10],ymm2[11,12],ymm6[13],ymm2[14,15]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm9 = [2,3,8,9,14,15,4,5,10,11,0,1,6,7,12,13,18,19,24,25,30,31,20,21,26,27,16,17,22,23,28,29]
+; AVX512DQ-NEXT: vpshufb %ymm9, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm7 = xmm4[0,1],xmm8[2],xmm4[3,4],xmm8[5],xmm4[6,7]
+; AVX512DQ-NEXT: vmovdqa64 %xmm8, %xmm25
; AVX512DQ-NEXT: vmovdqa64 %xmm4, %xmm26
; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm6 = [2,3,8,9,14,15,4,5,10,11,10,11,10,11,10,11]
-; AVX512DQ-NEXT: vpshufb %xmm6, %xmm3, %xmm3
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm1[5,6,7]
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm9 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
-; AVX512DQ-NEXT: vmovdqa %ymm9, %ymm1
-; AVX512DQ-NEXT: vpternlogq $202, %ymm21, %ymm20, %ymm1
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm4 = ymm1[2,3,0,1]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm4[2],ymm1[3,4],ymm4[5],ymm1[6,7,8,9],ymm4[10],ymm1[11,12],ymm4[13],ymm1[14,15]
+; AVX512DQ-NEXT: vpshufb %xmm6, %xmm7, %xmm7
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1,2,3,4],xmm2[5,6,7]
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm2[4,5,6,7]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm13 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
+; AVX512DQ-NEXT: vmovdqa %ymm13, %ymm2
+; AVX512DQ-NEXT: vpternlogq $202, %ymm20, %ymm18, %ymm2
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm4 = ymm2[2,3,0,1]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7,8,9],ymm4[10],ymm2[11,12],ymm4[13],ymm2[14,15]
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [2,3,8,9,14,15,4,5,12,13,10,11,0,1,6,7,18,19,24,25,30,31,20,21,28,29,26,27,16,17,22,23]
-; AVX512DQ-NEXT: vpshufb %ymm4, %ymm1, %ymm1
-; AVX512DQ-NEXT: vmovdqa %xmm14, %xmm7
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm14 = xmm8[0,1],xmm14[2],xmm8[3,4],xmm14[5],xmm8[6,7]
-; AVX512DQ-NEXT: vmovdqa64 %xmm8, %xmm27
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm2 = [4,5,4,5,4,5,4,5,10,11,0,1,6,7,12,13]
-; AVX512DQ-NEXT: vpshufb %xmm2, %xmm14, %xmm14
-; AVX512DQ-NEXT: vinserti128 $1, %xmm14, %ymm0, %ymm14
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm14 = ymm1[0,1,2],ymm14[3,4,5,6,7],ymm1[8,9,10],ymm14[11,12,13,14,15]
-; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,6,7,4]
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm14[4,5,6,7]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm19
-; AVX512DQ-NEXT: vmovdqa %ymm0, %ymm1
-; AVX512DQ-NEXT: vpternlogq $202, %ymm13, %ymm24, %ymm1
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm3 = ymm1[2,3,0,1]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7,8,9],ymm3[10],ymm1[11,12],ymm3[13],ymm1[14,15]
-; AVX512DQ-NEXT: vmovdqa64 %ymm28, %ymm3
-; AVX512DQ-NEXT: vpshufb %ymm3, %ymm1, %ymm1
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm3 = xmm12[0,1],xmm11[2],xmm12[3,4],xmm11[5],xmm12[6,7]
-; AVX512DQ-NEXT: vpshufb %xmm6, %xmm3, %xmm3
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm1[5,6,7]
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-NEXT: vmovdqa %ymm9, %ymm3
-; AVX512DQ-NEXT: vpternlogq $202, %ymm17, %ymm5, %ymm3
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm6 = ymm3[2,3,0,1]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0,1],ymm6[2],ymm3[3,4],ymm6[5],ymm3[6,7,8,9],ymm6[10],ymm3[11,12],ymm6[13],ymm3[14,15]
-; AVX512DQ-NEXT: vpshufb %ymm4, %ymm3, %ymm3
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm4 = xmm10[0,1],xmm15[2],xmm10[3,4],xmm15[5],xmm10[6,7]
-; AVX512DQ-NEXT: vpshufb %xmm2, %xmm4, %xmm2
-; AVX512DQ-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3,4,5,6,7],ymm3[8,9,10],ymm2[11,12,13,14,15]
-; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,6,7,4]
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
-; AVX512DQ-NEXT: vpternlogq $226, %ymm24, %ymm9, %ymm13
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm2 = ymm13[2,3,0,1]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0],ymm13[1,2],ymm2[3],ymm13[4,5],ymm2[6],ymm13[7],ymm2[8],ymm13[9,10],ymm2[11],ymm13[12,13],ymm2[14],ymm13[15]
-; AVX512DQ-NEXT: vpternlogq $226, %ymm17, %ymm0, %ymm5
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm3 = ymm5[2,3,0,1]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm5[1,2],ymm3[3],ymm5[4,5],ymm3[6],ymm5[7],ymm3[8],ymm5[9,10],ymm3[11],ymm5[12,13],ymm3[14],ymm5[15]
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15,20,21,26,27,16,17,22,23,28,29,18,19,24,25,30,31]
-; AVX512DQ-NEXT: vpshufb %ymm4, %ymm3, %ymm3
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm5 = xmm15[0],xmm10[1],xmm15[2,3],xmm10[4],xmm15[5,6],xmm10[7]
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,2,3,0,1,6,7,12,13,2,3,8,9,14,15]
-; AVX512DQ-NEXT: vpshufb %xmm6, %xmm5, %xmm5
-; AVX512DQ-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm5[5,6,7]
; AVX512DQ-NEXT: vpshufb %ymm4, %ymm2, %ymm2
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm5 = xmm11[0,1],xmm12[2],xmm11[3,4],xmm12[5],xmm11[6,7]
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm8 = [4,5,10,11,0,1,6,7,12,13,14,15,0,1,2,3]
-; AVX512DQ-NEXT: vpshufb %xmm8, %xmm5, %xmm5
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm5, %zmm3, %zmm5
-; AVX512DQ-NEXT: vextracti32x4 $2, %zmm5, %xmm5
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm2[5,6,7]
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm2 = ymm5[0,1,2,3],ymm2[4,5,6,7]
+; AVX512DQ-NEXT: vmovdqa64 %xmm19, %xmm8
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm14 = xmm8[0,1],xmm3[2],xmm8[3,4],xmm3[5],xmm8[6,7]
+; AVX512DQ-NEXT: vmovdqa64 %xmm3, %xmm27
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm3 = [4,5,4,5,4,5,4,5,10,11,0,1,6,7,12,13]
+; AVX512DQ-NEXT: vpshufb %xmm3, %xmm14, %xmm14
+; AVX512DQ-NEXT: vinserti128 $1, %xmm14, %ymm0, %ymm14
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm14 = ymm2[0,1,2],ymm14[3,4,5,6,7],ymm2[8,9,10],ymm14[11,12,13,14,15]
+; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,6,7,4]
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm14[4,5,6,7]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm7, %zmm2, %zmm19
+; AVX512DQ-NEXT: vmovdqa %ymm0, %ymm2
+; AVX512DQ-NEXT: vpternlogq $202, %ymm11, %ymm23, %ymm2
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm7 = ymm2[2,3,0,1]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm7[2],ymm2[3,4],ymm7[5],ymm2[6,7,8,9],ymm7[10],ymm2[11,12],ymm7[13],ymm2[14,15]
+; AVX512DQ-NEXT: vpshufb %ymm9, %ymm2, %ymm2
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm7 = xmm5[0,1],xmm15[2],xmm5[3,4],xmm15[5],xmm5[6,7]
+; AVX512DQ-NEXT: vpshufb %xmm6, %xmm7, %xmm6
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm2[5,6,7]
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm2 = ymm6[0,1,2,3],ymm2[4,5,6,7]
+; AVX512DQ-NEXT: vmovdqa %ymm13, %ymm6
+; AVX512DQ-NEXT: vpternlogq $202, %ymm24, %ymm12, %ymm6
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm7 = ymm6[2,3,0,1]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm6 = ymm6[0,1],ymm7[2],ymm6[3,4],ymm7[5],ymm6[6,7,8,9],ymm7[10],ymm6[11,12],ymm7[13],ymm6[14,15]
+; AVX512DQ-NEXT: vpshufb %ymm4, %ymm6, %ymm4
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm6 = xmm10[0,1],xmm1[2],xmm10[3,4],xmm1[5],xmm10[6,7]
+; AVX512DQ-NEXT: vpshufb %xmm3, %xmm6, %xmm3
+; AVX512DQ-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm3 = ymm4[0,1,2],ymm3[3,4,5,6,7],ymm4[8,9,10],ymm3[11,12,13,14,15]
+; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,6,7,4]
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
-; AVX512DQ-NEXT: vpternlogq $202, %ymm22, %ymm23, %ymm9
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm3 = ymm9[2,3,0,1]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm9[1,2],ymm3[3],ymm9[4,5],ymm3[6],ymm9[7],ymm3[8],ymm9[9,10],ymm3[11],ymm9[12,13],ymm3[14],ymm9[15]
-; AVX512DQ-NEXT: vpternlogq $202, %ymm21, %ymm20, %ymm0
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm5 = ymm0[2,3,0,1]
-; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm0 = ymm5[0],ymm0[1,2],ymm5[3],ymm0[4,5],ymm5[6],ymm0[7],ymm5[8],ymm0[9,10],ymm5[11],ymm0[12,13],ymm5[14],ymm0[15]
-; AVX512DQ-NEXT: vpshufb %ymm4, %ymm3, %ymm3
-; AVX512DQ-NEXT: vpshufb %ymm4, %ymm0, %ymm0
+; AVX512DQ-NEXT: vpternlogq $226, %ymm23, %ymm13, %ymm11
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm3 = ymm11[2,3,0,1]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm11[1,2],ymm3[3],ymm11[4,5],ymm3[6],ymm11[7],ymm3[8],ymm11[9,10],ymm3[11],ymm11[12,13],ymm3[14],ymm11[15]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm11 = [4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15,20,21,26,27,16,17,22,23,28,29,18,19,24,25,30,31]
+; AVX512DQ-NEXT: vpshufb %ymm11, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm4 = xmm15[0,1],xmm5[2],xmm15[3,4],xmm5[5],xmm15[6,7]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm5 = [4,5,10,11,0,1,6,7,12,13,14,15,0,1,2,3]
+; AVX512DQ-NEXT: vpshufb %xmm5, %xmm4, %xmm4
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm3[5,6,7]
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512DQ-NEXT: vpternlogq $226, %ymm24, %ymm0, %ymm12
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm4 = ymm12[2,3,0,1]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm4 = ymm4[0],ymm12[1,2],ymm4[3],ymm12[4,5],ymm4[6],ymm12[7],ymm4[8],ymm12[9,10],ymm4[11],ymm12[12,13],ymm4[14],ymm12[15]
+; AVX512DQ-NEXT: vpshufb %ymm11, %ymm4, %ymm4
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm10[1],xmm1[2,3],xmm10[4],xmm1[5,6],xmm10[7]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,2,3,0,1,6,7,12,13,2,3,8,9,14,15]
+; AVX512DQ-NEXT: vpshufb %xmm6, %xmm1, %xmm1
+; AVX512DQ-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm1 = ymm4[0,1,2,3,4],ymm1[5,6,7]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm1
+; AVX512DQ-NEXT: vpternlogq $202, %ymm21, %ymm22, %ymm13
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm3 = ymm13[2,3,0,1]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm13[1,2],ymm3[3],ymm13[4,5],ymm3[6],ymm13[7],ymm3[8],ymm13[9,10],ymm3[11],ymm13[12,13],ymm3[14],ymm13[15]
+; AVX512DQ-NEXT: vmovdqa64 %xmm25, %xmm4
+; AVX512DQ-NEXT: vmovdqa64 %xmm26, %xmm7
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm7[2],xmm4[3,4],xmm7[5],xmm4[6,7]
+; AVX512DQ-NEXT: vpshufb %xmm5, %xmm4, %xmm4
+; AVX512DQ-NEXT: vpshufb %ymm11, %ymm3, %ymm3
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm3[5,6,7]
+; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512DQ-NEXT: vpternlogq $202, %ymm20, %ymm18, %ymm0
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm4 = ymm0[2,3,0,1]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} ymm0 = ymm4[0],ymm0[1,2],ymm4[3],ymm0[4,5],ymm4[6],ymm0[7],ymm4[8],ymm0[9,10],ymm4[11],ymm0[12,13],ymm4[14],ymm0[15]
+; AVX512DQ-NEXT: vpshufb %ymm11, %ymm0, %ymm0
; AVX512DQ-NEXT: vmovdqa64 %xmm27, %xmm4
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm4 = xmm7[0],xmm4[1],xmm7[2,3],xmm4[4],xmm7[5,6],xmm4[7]
+; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0],xmm8[1],xmm4[2,3],xmm8[4],xmm4[5,6],xmm8[7]
; AVX512DQ-NEXT: vpshufb %xmm6, %xmm4, %xmm4
; AVX512DQ-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm4[5,6,7]
-; AVX512DQ-NEXT: vmovdqa64 %xmm25, %xmm4
-; AVX512DQ-NEXT: vmovdqa64 %xmm26, %xmm5
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2],xmm4[3,4],xmm5[5],xmm4[6,7]
-; AVX512DQ-NEXT: vpshufb %xmm8, %xmm4, %xmm4
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm4, %zmm0, %zmm4
-; AVX512DQ-NEXT: vextracti32x4 $2, %zmm4, %xmm4
-; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm3[5,6,7]
-; AVX512DQ-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm0
-; AVX512DQ-NEXT: vmovdqa64 %zmm18, (%rsi)
+; AVX512DQ-NEXT: vmovdqa64 %zmm17, (%rsi)
; AVX512DQ-NEXT: vmovdqa64 %zmm16, 64(%rsi)
; AVX512DQ-NEXT: vmovdqa64 %zmm19, 64(%rdx)
-; AVX512DQ-NEXT: vmovdqa64 %zmm1, (%rdx)
+; AVX512DQ-NEXT: vmovdqa64 %zmm2, (%rdx)
; AVX512DQ-NEXT: vmovdqa64 %zmm0, 64(%rcx)
-; AVX512DQ-NEXT: vmovdqa64 %zmm2, (%rcx)
+; AVX512DQ-NEXT: vmovdqa64 %zmm1, (%rcx)
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512DQ-FCP-LABEL: load_i16_stride3_vf64:
; AVX512DQ-FCP: # %bb.0:
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm0 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535]
-; AVX512DQ-FCP-NEXT: vmovdqa64 224(%rdi), %ymm20
-; AVX512DQ-FCP-NEXT: vmovdqa64 192(%rdi), %ymm21
+; AVX512DQ-FCP-NEXT: vmovdqa64 224(%rdi), %ymm18
+; AVX512DQ-FCP-NEXT: vmovdqa64 192(%rdi), %ymm20
; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, %ymm1
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm20, %ymm21, %ymm1
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm1[2,3,0,1]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[2,3],ymm3[4],ymm1[5,6],ymm3[7],ymm1[8],ymm3[9],ymm1[10,11],ymm3[12],ymm1[13,14],ymm3[15]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,6,7,12,13,2,3,4,5,14,15,8,9,10,11,16,17,22,23,28,29,18,19,20,21,30,31,24,25,26,27]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm3, %ymm1, %ymm5
-; AVX512DQ-FCP-NEXT: vmovdqa 272(%rdi), %xmm8
+; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm18, %ymm20, %ymm1
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm1[2,3,0,1]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0],ymm2[1],ymm1[2,3],ymm2[4],ymm1[5,6],ymm2[7],ymm1[8],ymm2[9],ymm1[10,11],ymm2[12],ymm1[13,14],ymm2[15]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm7 = [0,1,6,7,12,13,2,3,4,5,14,15,8,9,10,11,16,17,22,23,28,29,18,19,20,21,30,31,24,25,26,27]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm7, %ymm2, %ymm5
+; AVX512DQ-FCP-NEXT: vmovdqa 272(%rdi), %xmm1
; AVX512DQ-FCP-NEXT: vmovdqa 256(%rdi), %xmm2
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm2[0,1],xmm8[2],xmm2[3,4],xmm8[5],xmm2[6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa %xmm2, %xmm14
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm9 = [4,5,14,15,0,1,2,3,8,9,14,15,4,5,10,11]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm6, %xmm6
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm2[0,1],xmm1[2],xmm2[3,4],xmm1[5],xmm2[6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa %xmm2, %xmm3
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm1, %xmm19
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm13 = [4,5,14,15,0,1,2,3,8,9,14,15,4,5,10,11]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm13, %xmm6, %xmm6
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm6 = ymm5[0,1,2],ymm6[3,4,5,6,7],ymm5[8,9,10],ymm6[11,12,13,14,15]
; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,6,5,4,7]
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 320(%rdi), %ymm22
-; AVX512DQ-FCP-NEXT: vmovdqa64 352(%rdi), %ymm23
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, %ymm6
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm22, %ymm23, %ymm6
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm6[2,3,0,1]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm6 = ymm6[0],ymm7[1],ymm6[2,3],ymm7[4],ymm6[5,6],ymm7[7],ymm6[8],ymm7[9],ymm6[10,11],ymm7[12],ymm6[13,14],ymm7[15]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [0,1,6,7,12,13,2,3,8,9,14,15,4,5,10,11,16,17,22,23,28,29,18,19,24,25,30,31,20,21,26,27]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm6, %ymm12
+; AVX512DQ-FCP-NEXT: vmovdqa64 320(%rdi), %ymm21
+; AVX512DQ-FCP-NEXT: vmovdqa64 352(%rdi), %ymm22
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, %ymm8
+; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm21, %ymm22, %ymm8
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm8[2,3,0,1]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm8 = ymm8[0],ymm9[1],ymm8[2,3],ymm9[4],ymm8[5,6],ymm9[7],ymm8[8],ymm9[9],ymm8[10,11],ymm9[12],ymm8[13,14],ymm9[15]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [0,1,6,7,12,13,2,3,8,9,14,15,4,5,10,11,16,17,22,23,28,29,18,19,24,25,30,31,20,21,26,27]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm8, %ymm11
; AVX512DQ-FCP-NEXT: vmovdqa 304(%rdi), %xmm1
; AVX512DQ-FCP-NEXT: vmovdqa 288(%rdi), %xmm2
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm13 = xmm2[0],xmm1[1],xmm2[2,3],xmm1[4],xmm2[5,6],xmm1[7]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm12 = xmm2[0],xmm1[1],xmm2[2,3],xmm1[4],xmm2[5,6],xmm1[7]
; AVX512DQ-FCP-NEXT: vmovdqa %xmm2, %xmm4
-; AVX512DQ-FCP-NEXT: vmovdqa %xmm1, %xmm6
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm15 = [0,1,6,7,12,13,2,3,8,9,14,15,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm15, %xmm13, %xmm13
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm12 = ymm13[0,1,2],ymm12[3,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm12, %zmm5, %zmm16
-; AVX512DQ-FCP-NEXT: vmovdqa64 128(%rdi), %ymm24
-; AVX512DQ-FCP-NEXT: vmovdqa 160(%rdi), %ymm13
+; AVX512DQ-FCP-NEXT: vmovdqa %xmm1, %xmm8
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm14 = [0,1,6,7,12,13,2,3,8,9,14,15,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm14, %xmm12, %xmm12
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm11 = ymm12[0,1,2],ymm11[3,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm5, %zmm16
+; AVX512DQ-FCP-NEXT: vmovdqa64 128(%rdi), %ymm23
+; AVX512DQ-FCP-NEXT: vmovdqa 160(%rdi), %ymm11
; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, %ymm5
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm24, %ymm13, %ymm5
+; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm23, %ymm11, %ymm5
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm5[2,3,0,1]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm5 = ymm5[0],ymm12[1],ymm5[2,3],ymm12[4],ymm5[5,6],ymm12[7],ymm5[8],ymm12[9],ymm5[10,11],ymm12[12],ymm5[13,14],ymm12[15]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm5, %ymm5
-; AVX512DQ-FCP-NEXT: vmovdqa 112(%rdi), %xmm11
-; AVX512DQ-FCP-NEXT: vmovdqa 96(%rdi), %xmm12
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm10 = xmm12[0],xmm11[1],xmm12[2,3],xmm11[4],xmm12[5,6],xmm11[7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm15, %xmm10, %xmm10
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm10[0,1,2],ymm5[3,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 (%rdi), %ymm17
-; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm5
+; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm5, %ymm10
+; AVX512DQ-FCP-NEXT: vmovdqa 112(%rdi), %xmm15
+; AVX512DQ-FCP-NEXT: vmovdqa 96(%rdi), %xmm5
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm12 = xmm5[0],xmm15[1],xmm5[2,3],xmm15[4],xmm5[5,6],xmm15[7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm14, %xmm12, %xmm12
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm6 = ymm12[0,1,2],ymm10[3,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 (%rdi), %ymm24
+; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm12
; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, %ymm10
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm5, %ymm17, %ymm10
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm15 = ymm10[2,3,0,1]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm10 = ymm10[0],ymm15[1],ymm10[2,3],ymm15[4],ymm10[5,6],ymm15[7],ymm10[8],ymm15[9],ymm10[10,11],ymm15[12],ymm10[13,14],ymm15[15]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm3, %ymm10, %ymm2
+; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm12, %ymm24, %ymm10
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm1 = ymm10[2,3,0,1]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm10[0],ymm1[1],ymm10[2,3],ymm1[4],ymm10[5,6],ymm1[7],ymm10[8],ymm1[9],ymm10[10,11],ymm1[12],ymm10[13,14],ymm1[15]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm7, %ymm1, %ymm7
; AVX512DQ-FCP-NEXT: vmovdqa 80(%rdi), %xmm10
-; AVX512DQ-FCP-NEXT: vmovdqa 64(%rdi), %xmm15
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm15[0,1],xmm10[2],xmm15[3,4],xmm10[5],xmm15[6,7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm9, %xmm3, %xmm3
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm2[0,1,2],ymm3[3,4,5,6,7],ymm2[8,9,10],ymm3[11,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,5,4,7]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm18
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, %ymm1
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm23, %ymm22, %ymm1
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm1[2,3,0,1]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7,8,9],ymm2[10],ymm1[11,12],ymm2[13],ymm1[14,15]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm2 = [2,3,8,9,14,15,4,5,10,11,0,1,6,7,12,13,18,19,24,25,30,31,20,21,26,27,16,17,22,23,28,29]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm2, %ymm1, %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm2, %ymm28
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1],xmm6[2],xmm4[3,4],xmm6[5],xmm4[6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm6, %xmm25
+; AVX512DQ-FCP-NEXT: vmovdqa 64(%rdi), %xmm1
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm2 = xmm1[0,1],xmm10[2],xmm1[3,4],xmm10[5],xmm1[6,7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm13, %xmm2, %xmm2
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm7[0,1,2],ymm2[3,4,5,6,7],ymm7[8,9,10],ymm2[11,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,6,5,4,7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm7[0,1,2,3],ymm2[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm2, %zmm17
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, %ymm2
+; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm22, %ymm21, %ymm2
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm2[2,3,0,1]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm6[2],ymm2[3,4],ymm6[5],ymm2[6,7,8,9],ymm6[10],ymm2[11,12],ymm6[13],ymm2[14,15]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [2,3,8,9,14,15,4,5,10,11,0,1,6,7,12,13,18,19,24,25,30,31,20,21,26,27,16,17,22,23,28,29]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm2, %ymm2
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm4[0,1],xmm8[2],xmm4[3,4],xmm8[5],xmm4[6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm8, %xmm25
; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm4, %xmm26
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [2,3,8,9,14,15,4,5,10,11,10,11,10,11,10,11]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm3, %xmm3
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm1[5,6,7]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm9, %ymm1
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm21, %ymm20, %ymm1
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm1[2,3,0,1]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm4[2],ymm1[3,4],ymm4[5],ymm1[6,7,8,9],ymm4[10],ymm1[11,12],ymm4[13],ymm1[14,15]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm7, %xmm7
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm7[0,1,2,3,4],xmm2[5,6,7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm2[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm13 = [65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm13, %ymm2
+; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm20, %ymm18, %ymm2
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm2[2,3,0,1]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7,8,9],ymm4[10],ymm2[11,12],ymm4[13],ymm2[14,15]
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [2,3,8,9,14,15,4,5,12,13,10,11,0,1,6,7,18,19,24,25,30,31,20,21,28,29,26,27,16,17,22,23]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm1, %ymm1
-; AVX512DQ-FCP-NEXT: vmovdqa %xmm14, %xmm7
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm14 = xmm8[0,1],xmm14[2],xmm8[3,4],xmm14[5],xmm8[6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm8, %xmm27
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm2 = [4,5,4,5,4,5,4,5,10,11,0,1,6,7,12,13]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm2, %xmm14, %xmm14
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm14, %ymm0, %ymm14
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm14 = ymm1[0,1,2],ymm14[3,4,5,6,7],ymm1[8,9,10],ymm14[11,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,6,7,4]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm14[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm19
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, %ymm1
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm13, %ymm24, %ymm1
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm1[2,3,0,1]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7,8,9],ymm3[10],ymm1[11,12],ymm3[13],ymm1[14,15]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %ymm28, %ymm3
-; AVX512DQ-FCP-NEXT: vpshufb %ymm3, %ymm1, %ymm1
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm12[0,1],xmm11[2],xmm12[3,4],xmm11[5],xmm12[6,7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm3, %xmm3
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm1[5,6,7]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm9, %ymm3
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm17, %ymm5, %ymm3
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm3[2,3,0,1]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0,1],ymm6[2],ymm3[3,4],ymm6[5],ymm3[6,7,8,9],ymm6[10],ymm3[11,12],ymm6[13],ymm3[14,15]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm3, %ymm3
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm10[0,1],xmm15[2],xmm10[3,4],xmm15[5],xmm10[6,7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm2, %xmm4, %xmm2
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm3[0,1,2],ymm2[3,4,5,6,7],ymm3[8,9,10],ymm2[11,12,13,14,15]
-; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,6,7,4]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %ymm24, %ymm9, %ymm13
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm13[2,3,0,1]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0],ymm13[1,2],ymm2[3],ymm13[4,5],ymm2[6],ymm13[7],ymm2[8],ymm13[9,10],ymm2[11],ymm13[12,13],ymm2[14],ymm13[15]
-; AVX512DQ-FCP-NEXT: vpternlogq $226, %ymm17, %ymm0, %ymm5
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm5[2,3,0,1]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm5[1,2],ymm3[3],ymm5[4,5],ymm3[6],ymm5[7],ymm3[8],ymm5[9,10],ymm3[11],ymm5[12,13],ymm3[14],ymm5[15]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm4 = [4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15,20,21,26,27,16,17,22,23,28,29,18,19,24,25,30,31]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm3, %ymm3
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm15[0],xmm10[1],xmm15[2,3],xmm10[4],xmm15[5,6],xmm10[7]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,2,3,0,1,6,7,12,13,2,3,8,9,14,15]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm5, %xmm5
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm5, %ymm0, %ymm5
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1,2,3,4],ymm5[5,6,7]
; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm2, %ymm2
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm11[0,1],xmm12[2],xmm11[3,4],xmm12[5],xmm11[6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [4,5,10,11,0,1,6,7,12,13,14,15,0,1,2,3]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm5, %xmm5
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm3, %zmm5
-; AVX512DQ-FCP-NEXT: vextracti32x4 $2, %zmm5, %xmm5
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3,4],xmm2[5,6,7]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm5[0,1,2,3],ymm2[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm19, %xmm8
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm14 = xmm8[0,1],xmm3[2],xmm8[3,4],xmm3[5],xmm8[6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm3, %xmm27
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [4,5,4,5,4,5,4,5,10,11,0,1,6,7,12,13]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm14, %xmm14
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm14, %ymm0, %ymm14
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm14 = ymm2[0,1,2],ymm14[3,4,5,6,7],ymm2[8,9,10],ymm14[11,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,6,7,4]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm14[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm2, %zmm19
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, %ymm2
+; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm11, %ymm23, %ymm2
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm2[2,3,0,1]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0,1],ymm7[2],ymm2[3,4],ymm7[5],ymm2[6,7,8,9],ymm7[10],ymm2[11,12],ymm7[13],ymm2[14,15]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm2, %ymm2
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm5[0,1],xmm15[2],xmm5[3,4],xmm15[5],xmm5[6,7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm7, %xmm6
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3,4],xmm2[5,6,7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm2 = ymm6[0,1,2,3],ymm2[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm13, %ymm6
+; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm24, %ymm12, %ymm6
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm6[2,3,0,1]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm6 = ymm6[0,1],ymm7[2],ymm6[3,4],ymm7[5],ymm6[6,7,8,9],ymm7[10],ymm6[11,12],ymm7[13],ymm6[14,15]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm6, %ymm4
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm6 = xmm10[0,1],xmm1[2],xmm10[3,4],xmm1[5],xmm10[6,7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm6, %xmm3
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm4[0,1,2],ymm3[3,4,5,6,7],ymm4[8,9,10],ymm3[11,12,13,14,15]
+; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,6,7,4]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm22, %ymm23, %ymm9
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm9[2,3,0,1]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm9[1,2],ymm3[3],ymm9[4,5],ymm3[6],ymm9[7],ymm3[8],ymm9[9,10],ymm3[11],ymm9[12,13],ymm3[14],ymm9[15]
-; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm21, %ymm20, %ymm0
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm0[2,3,0,1]
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm5[0],ymm0[1,2],ymm5[3],ymm0[4,5],ymm5[6],ymm0[7],ymm5[8],ymm0[9,10],ymm5[11],ymm0[12,13],ymm5[14],ymm0[15]
-; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm3, %ymm3
-; AVX512DQ-FCP-NEXT: vpshufb %ymm4, %ymm0, %ymm0
+; AVX512DQ-FCP-NEXT: vpternlogq $226, %ymm23, %ymm13, %ymm11
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm11[2,3,0,1]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm11[1,2],ymm3[3],ymm11[4,5],ymm3[6],ymm11[7],ymm3[8],ymm11[9,10],ymm3[11],ymm11[12,13],ymm3[14],ymm11[15]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15,20,21,26,27,16,17,22,23,28,29,18,19,24,25,30,31]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm3, %ymm3
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm15[0,1],xmm5[2],xmm15[3,4],xmm5[5],xmm15[6,7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm5 = [4,5,10,11,0,1,6,7,12,13,14,15,0,1,2,3]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm5, %xmm4, %xmm4
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm3[5,6,7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpternlogq $226, %ymm24, %ymm0, %ymm12
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm12[2,3,0,1]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm4 = ymm4[0],ymm12[1,2],ymm4[3],ymm12[4,5],ymm4[6],ymm12[7],ymm4[8],ymm12[9,10],ymm4[11],ymm12[12,13],ymm4[14],ymm12[15]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm4, %ymm4
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm10[1],xmm1[2,3],xmm10[4],xmm1[5,6],xmm10[7]
+; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,2,3,0,1,6,7,12,13,2,3,8,9,14,15]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm1, %xmm1
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm1 = ymm4[0,1,2,3,4],ymm1[5,6,7]
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm1
+; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm21, %ymm22, %ymm13
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm13[2,3,0,1]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm13[1,2],ymm3[3],ymm13[4,5],ymm3[6],ymm13[7],ymm3[8],ymm13[9,10],ymm3[11],ymm13[12,13],ymm3[14],ymm13[15]
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm25, %xmm4
+; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm26, %xmm7
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm7[2],xmm4[3,4],xmm7[5],xmm4[6,7]
+; AVX512DQ-FCP-NEXT: vpshufb %xmm5, %xmm4, %xmm4
+; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm3, %ymm3
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm3[5,6,7]
+; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpternlogq $202, %ymm20, %ymm18, %ymm0
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm0[2,3,0,1]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} ymm0 = ymm4[0],ymm0[1,2],ymm4[3],ymm0[4,5],ymm4[6],ymm0[7],ymm4[8],ymm0[9,10],ymm4[11],ymm0[12,13],ymm4[14],ymm0[15]
+; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm0, %ymm0
; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm27, %xmm4
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm7[0],xmm4[1],xmm7[2,3],xmm4[4],xmm7[5,6],xmm4[7]
+; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0],xmm8[1],xmm4[2,3],xmm8[4],xmm4[5,6],xmm8[7]
; AVX512DQ-FCP-NEXT: vpshufb %xmm6, %xmm4, %xmm4
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4],ymm4[5,6,7]
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm25, %xmm4
-; AVX512DQ-FCP-NEXT: vmovdqa64 %xmm26, %xmm5
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2],xmm4[3,4],xmm5[5],xmm4[6,7]
-; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm4, %xmm4
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm4, %zmm0, %zmm4
-; AVX512DQ-FCP-NEXT: vextracti32x4 $2, %zmm4, %xmm4
-; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3,4],xmm3[5,6,7]
-; AVX512DQ-FCP-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm0
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm18, (%rsi)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm17, (%rsi)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm16, 64(%rsi)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm19, 64(%rdx)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm1, (%rdx)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm2, (%rdx)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, 64(%rcx)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm2, (%rcx)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm1, (%rcx)
; AVX512DQ-FCP-NEXT: vzeroupper
; AVX512DQ-FCP-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
index 8b6ba51..8091afb 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-7.ll
@@ -1246,29 +1246,28 @@ define void @store_i8_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512BW-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX512BW-FCP-NEXT: vpmovsxbq {{.*#+}} ymm1 = [0,2,4,0]
; AVX512BW-FCP-NEXT: vpermi2q %ymm3, %ymm0, %ymm1
-; AVX512BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [1,3,5,7,1,3,5,7]
-; AVX512BW-FCP-NEXT: # ymm0 = mem[0,1,0,1]
-; AVX512BW-FCP-NEXT: vpermd %ymm2, %ymm0, %ymm0
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm0[1,5,9,13],zero,zero,zero,ymm0[2,6,10,14],zero,zero,zero,ymm0[19,23,27,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [1,3,5,0,5,1,3,0]
-; AVX512BW-FCP-NEXT: vpermd %ymm1, %ymm3, %ymm3
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[0,4,8],zero,zero,zero,zero,ymm3[1,5,9],zero,zero,zero,zero,ymm3[2,6,18],zero,zero,zero,zero,ymm3[23,27,19],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm2[0,8],zero,zero,zero,zero,zero,ymm2[1,9],zero,zero,zero,zero,zero,ymm2[2,10,18,26],zero,zero,zero,zero,zero,ymm2[19,27],zero,zero,zero,zero,zero,ymm2[20,28]
+; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm2[2,3,0,1]
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm3 = zero,zero,ymm3[0,8],zero,zero,zero,zero,zero,ymm3[1,9],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[19,27],zero,zero,zero,zero,zero,ymm3[20,28],zero,zero
; AVX512BW-FCP-NEXT: vpor %ymm0, %ymm3, %ymm0
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm2[0,8],zero,zero,zero,zero,zero,ymm2[1,9],zero,zero,zero,zero,zero,ymm2[2,10,18,26],zero,zero,zero,zero,zero,ymm2[19,27],zero,zero,zero,zero,zero,ymm2[20,28]
-; AVX512BW-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,0,1]
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm2 = zero,zero,ymm2[0,8],zero,zero,zero,zero,zero,ymm2[1,9],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[19,27],zero,zero,zero,zero,zero,ymm2[20,28],zero,zero
-; AVX512BW-FCP-NEXT: vpor %ymm3, %ymm2, %ymm2
; AVX512BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [0,2,4,6,0,2,4,6]
; AVX512BW-FCP-NEXT: # ymm3 = mem[0,1,0,1]
-; AVX512BW-FCP-NEXT: vpermd %ymm1, %ymm3, %ymm1
+; AVX512BW-FCP-NEXT: vpermd %ymm1, %ymm3, %ymm3
; AVX512BW-FCP-NEXT: movl $236730480, %ecx # imm = 0xE1C3870
; AVX512BW-FCP-NEXT: kmovd %ecx, %k1
-; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm2 {%k1} = ymm1[u,u,u,u,0,4,8,u,u,u,u,1,5,9,u,u,u,u,18,22,26,u,u,u,u,19,23,27,u,u,u,u]
-; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm2, %zmm0
-; AVX512BW-FCP-NEXT: vmovdqa %ymm2, (%rax)
-; AVX512BW-FCP-NEXT: vextracti32x4 $2, %zmm0, 32(%rax)
-; AVX512BW-FCP-NEXT: vextracti32x4 $3, %zmm0, %xmm0
-; AVX512BW-FCP-NEXT: vmovq %xmm0, 48(%rax)
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm0 {%k1} = ymm3[u,u,u,u,0,4,8,u,u,u,u,1,5,9,u,u,u,u,18,22,26,u,u,u,u,19,23,27,u,u,u,u]
+; AVX512BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [1,3,5,7,1,3,5,7]
+; AVX512BW-FCP-NEXT: # ymm3 = mem[0,1,0,1]
+; AVX512BW-FCP-NEXT: vpermd %ymm2, %ymm3, %ymm2
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm2 = zero,zero,zero,ymm2[1,5,9,13],zero,zero,zero,ymm2[2,6,10,14],zero,zero,zero,ymm2[19,23,27,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [1,3,5,0,5,1,3,0]
+; AVX512BW-FCP-NEXT: vpermd %ymm1, %ymm3, %ymm1
+; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[0,4,8],zero,zero,zero,zero,ymm1[1,5,9],zero,zero,zero,zero,ymm1[2,6,18],zero,zero,zero,zero,ymm1[23,27,19],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512BW-FCP-NEXT: vpor %ymm2, %ymm1, %ymm1
+; AVX512BW-FCP-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX512BW-FCP-NEXT: vmovq %xmm2, 48(%rax)
+; AVX512BW-FCP-NEXT: vmovdqa %xmm1, 32(%rax)
+; AVX512BW-FCP-NEXT: vmovdqa %ymm0, (%rax)
; AVX512BW-FCP-NEXT: vzeroupper
; AVX512BW-FCP-NEXT: retq
;
@@ -1326,29 +1325,28 @@ define void @store_i8_stride7_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecp
; AVX512DQ-BW-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX512DQ-BW-FCP-NEXT: vpmovsxbq {{.*#+}} ymm1 = [0,2,4,0]
; AVX512DQ-BW-FCP-NEXT: vpermi2q %ymm3, %ymm0, %ymm1
-; AVX512DQ-BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [1,3,5,7,1,3,5,7]
-; AVX512DQ-BW-FCP-NEXT: # ymm0 = mem[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT: vpermd %ymm2, %ymm0, %ymm0
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,ymm0[1,5,9,13],zero,zero,zero,ymm0[2,6,10,14],zero,zero,zero,ymm0[19,23,27,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [1,3,5,0,5,1,3,0]
-; AVX512DQ-BW-FCP-NEXT: vpermd %ymm1, %ymm3, %ymm3
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[0,4,8],zero,zero,zero,zero,ymm3[1,5,9],zero,zero,zero,zero,ymm3[2,6,18],zero,zero,zero,zero,ymm3[23,27,19],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm0 = ymm2[0,8],zero,zero,zero,zero,zero,ymm2[1,9],zero,zero,zero,zero,zero,ymm2[2,10,18,26],zero,zero,zero,zero,zero,ymm2[19,27],zero,zero,zero,zero,zero,ymm2[20,28]
+; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm3 = ymm2[2,3,0,1]
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm3 = zero,zero,ymm3[0,8],zero,zero,zero,zero,zero,ymm3[1,9],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm3[19,27],zero,zero,zero,zero,zero,ymm3[20,28],zero,zero
; AVX512DQ-BW-FCP-NEXT: vpor %ymm0, %ymm3, %ymm0
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm3 = ymm2[0,8],zero,zero,zero,zero,zero,ymm2[1,9],zero,zero,zero,zero,zero,ymm2[2,10,18,26],zero,zero,zero,zero,zero,ymm2[19,27],zero,zero,zero,zero,zero,ymm2[20,28]
-; AVX512DQ-BW-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,0,1]
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm2 = zero,zero,ymm2[0,8],zero,zero,zero,zero,zero,ymm2[1,9],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm2[19,27],zero,zero,zero,zero,zero,ymm2[20,28],zero,zero
-; AVX512DQ-BW-FCP-NEXT: vpor %ymm3, %ymm2, %ymm2
; AVX512DQ-BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [0,2,4,6,0,2,4,6]
; AVX512DQ-BW-FCP-NEXT: # ymm3 = mem[0,1,0,1]
-; AVX512DQ-BW-FCP-NEXT: vpermd %ymm1, %ymm3, %ymm1
+; AVX512DQ-BW-FCP-NEXT: vpermd %ymm1, %ymm3, %ymm3
; AVX512DQ-BW-FCP-NEXT: movl $236730480, %ecx # imm = 0xE1C3870
; AVX512DQ-BW-FCP-NEXT: kmovd %ecx, %k1
-; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm2 {%k1} = ymm1[u,u,u,u,0,4,8,u,u,u,u,1,5,9,u,u,u,u,18,22,26,u,u,u,u,19,23,27,u,u,u,u]
-; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm2, %zmm0
-; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm2, (%rax)
-; AVX512DQ-BW-FCP-NEXT: vextracti32x4 $2, %zmm0, 32(%rax)
-; AVX512DQ-BW-FCP-NEXT: vextracti32x4 $3, %zmm0, %xmm0
-; AVX512DQ-BW-FCP-NEXT: vmovq %xmm0, 48(%rax)
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm0 {%k1} = ymm3[u,u,u,u,0,4,8,u,u,u,u,1,5,9,u,u,u,u,18,22,26,u,u,u,u,19,23,27,u,u,u,u]
+; AVX512DQ-BW-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [1,3,5,7,1,3,5,7]
+; AVX512DQ-BW-FCP-NEXT: # ymm3 = mem[0,1,0,1]
+; AVX512DQ-BW-FCP-NEXT: vpermd %ymm2, %ymm3, %ymm2
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm2 = zero,zero,zero,ymm2[1,5,9,13],zero,zero,zero,ymm2[2,6,10,14],zero,zero,zero,ymm2[19,23,27,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-BW-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [1,3,5,0,5,1,3,0]
+; AVX512DQ-BW-FCP-NEXT: vpermd %ymm1, %ymm3, %ymm1
+; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[0,4,8],zero,zero,zero,zero,ymm1[1,5,9],zero,zero,zero,zero,ymm1[2,6,18],zero,zero,zero,zero,ymm1[23,27,19],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512DQ-BW-FCP-NEXT: vpor %ymm2, %ymm1, %ymm1
+; AVX512DQ-BW-FCP-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX512DQ-BW-FCP-NEXT: vmovq %xmm2, 48(%rax)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa %xmm1, 32(%rax)
+; AVX512DQ-BW-FCP-NEXT: vmovdqa %ymm0, (%rax)
; AVX512DQ-BW-FCP-NEXT: vzeroupper
; AVX512DQ-BW-FCP-NEXT: retq
%in.vec0 = load <8 x i8>, ptr %in.vecptr0, align 64
@@ -2053,77 +2051,76 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512: # %bb.0:
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r10
-; AVX512-NEXT: vmovdqa (%rdi), %xmm4
-; AVX512-NEXT: vmovdqa (%rsi), %xmm5
-; AVX512-NEXT: vmovdqa (%rdx), %xmm6
-; AVX512-NEXT: vmovdqa (%rcx), %xmm7
-; AVX512-NEXT: vmovdqa (%r8), %xmm0
-; AVX512-NEXT: vmovdqa (%r10), %xmm1
-; AVX512-NEXT: vinserti128 $1, %xmm7, %ymm6, %ymm3
-; AVX512-NEXT: vinserti128 $1, %xmm5, %ymm4, %ymm2
-; AVX512-NEXT: vinserti128 $1, (%r9), %ymm0, %ymm0
-; AVX512-NEXT: vinserti32x4 $2, %xmm1, %zmm0, %zmm0
-; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm6[8],xmm7[8],xmm6[9],xmm7[9],xmm6[10],xmm7[10],xmm6[11],xmm7[11],xmm6[12],xmm7[12],xmm6[13],xmm7[13],xmm6[14],xmm7[14],xmm6[15],xmm7[15]
-; AVX512-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[u,u],zero,zero,xmm6[12,13,u,u,u],zero,zero,xmm6[14,15,u,u,u]
-; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm4[8],xmm5[8],xmm4[9],xmm5[9],xmm4[10],xmm5[10],xmm4[11],xmm5[11],xmm4[12],xmm5[12],xmm4[13],xmm5[13],xmm4[14],xmm5[14],xmm4[15],xmm5[15]
-; AVX512-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,u,12,13],zero,zero,xmm4[u,u,u,14,15],zero,zero,xmm4[u,u,u]
-; AVX512-NEXT: vpor %xmm6, %xmm4, %xmm4
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm5
-; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
-; AVX512-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[10],zero,xmm5[u,u,u,u,13,12],zero,xmm5[u,u,u,u,15,14],zero
-; AVX512-NEXT: vpshufb {{.*#+}} xmm6 = zero,xmm1[13,u,u,u,u],zero,zero,xmm1[14,u,u,u,u],zero,zero,xmm1[15]
-; AVX512-NEXT: vpor %xmm6, %xmm5, %xmm5
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm5
-; AVX512-NEXT: vpermq {{.*#+}} ymm4 = ymm2[3,1,1,3]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[1],zero,zero,ymm4[u,u,u,10,2],zero,zero,ymm4[u,u,u,11,3],zero,zero,ymm4[u,u,u,20,28],zero,zero,ymm4[u,u,u,21,29],zero,zero,ymm4[u]
-; AVX512-NEXT: vpermq {{.*#+}} ymm6 = ymm3[1,3,3,1]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm6 = zero,ymm6[1,9,u,u,u],zero,zero,ymm6[2,10,u,u,u],zero,zero,ymm6[3,19,u,u,u],zero,zero,ymm6[28,20,u,u,u],zero,zero,ymm6[29,21,u]
-; AVX512-NEXT: vpor %ymm4, %ymm6, %ymm4
-; AVX512-NEXT: vpshufhw {{.*#+}} xmm6 = xmm1[0,1,2,3,4,5,5,6]
-; AVX512-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[2,2,3,3]
-; AVX512-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1]
-; AVX512-NEXT: vpermq {{.*#+}} ymm7 = ymm0[1,3,1,3]
+; AVX512-NEXT: vmovdqa (%rdi), %xmm0
+; AVX512-NEXT: vmovdqa (%rsi), %xmm1
+; AVX512-NEXT: vmovdqa (%rdx), %xmm5
+; AVX512-NEXT: vmovdqa (%rcx), %xmm6
+; AVX512-NEXT: vmovdqa (%r8), %xmm3
+; AVX512-NEXT: vmovdqa (%r9), %xmm4
+; AVX512-NEXT: vmovdqa (%r10), %xmm2
+; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm8
+; AVX512-NEXT: vinserti128 $1, %xmm6, %ymm5, %ymm9
+; AVX512-NEXT: vinserti128 $1, %xmm4, %ymm3, %ymm7
+; AVX512-NEXT: vpshufb {{.*#+}} ymm10 = ymm9[u,u,u,u,u,5],zero,ymm9[u,u,u,u,u,6],zero,ymm9[u,u,u,u,u],zero,ymm9[23,u,u,u,u,u],zero,ymm9[24,u,u,u,u]
+; AVX512-NEXT: vpermq {{.*#+}} ymm11 = ymm9[2,3,0,1]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm11 = ymm11[u,u,u,u,u],zero,ymm11[5,u,u,u,u,u],zero,ymm11[6,u,u,u,u,u,23],zero,ymm11[u,u,u,u,u,24],zero,ymm11[u,u,u,u]
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm12 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255]
+; AVX512-NEXT: vpternlogq $50, %ymm10, %ymm12, %ymm11
+; AVX512-NEXT: vpermq {{.*#+}} ymm10 = ymm9[0,2,0,2]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm10 = zero,zero,ymm10[0,8,u,u,u],zero,zero,ymm10[1,9,u,u,u],zero,zero,ymm10[18,26,u,u,u],zero,zero,ymm10[19,27,u,u,u],zero,zero,ymm10[20,28]
+; AVX512-NEXT: vinserti64x4 $1, %ymm11, %zmm10, %zmm10
+; AVX512-NEXT: vpshufb {{.*#+}} ymm11 = ymm8[u,u,u,5],zero,ymm8[u,u,u,u,u,6],zero,ymm8[u,u,u,u,u],zero,ymm8[23,u,u,u,u,u],zero,ymm8[24,u,u,u,u,u],zero
+; AVX512-NEXT: vpermq {{.*#+}} ymm13 = ymm8[2,3,0,1]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm13 = ymm13[u,u,u],zero,ymm13[5,u,u,u,u,u],zero,ymm13[6,u,u,u,u,u,23],zero,ymm13[u,u,u,u,u,24],zero,ymm13[u,u,u,u,u,25]
+; AVX512-NEXT: vpternlogq $200, %ymm11, %ymm12, %ymm13
+; AVX512-NEXT: vpermq {{.*#+}} ymm11 = ymm8[0,2,0,2]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm11 = ymm11[0,8],zero,zero,ymm11[u,u,u,1,9],zero,zero,ymm11[u,u,u,2,10],zero,zero,ymm11[u,u,u,19,27],zero,zero,ymm11[u,u,u,20,28],zero,zero
+; AVX512-NEXT: vinserti64x4 $1, %ymm13, %zmm11, %zmm11
+; AVX512-NEXT: vporq %zmm10, %zmm11, %zmm10
+; AVX512-NEXT: vpshufb {{.*#+}} ymm11 = ymm7[4],zero,ymm7[u,u,u,u,u,5],zero,ymm7[u,u,u,u,u,6],zero,ymm7[u,u,u,u,u],zero,ymm7[23,u,u,u,u,u],zero,ymm7[24,u,u]
+; AVX512-NEXT: vpermq {{.*#+}} ymm12 = ymm7[2,3,0,1]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm12 = zero,ymm12[4,u,u,u,u,u],zero,ymm12[5,u,u,u,u,u],zero,ymm12[6,u,u,u,u,u,23],zero,ymm12[u,u,u,u,u,24],zero,ymm12[u,u]
+; AVX512-NEXT: vmovdqa {{.*#+}} ymm13 = [255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255]
+; AVX512-NEXT: vpternlogq $200, %ymm11, %ymm13, %ymm12
+; AVX512-NEXT: vpermq {{.*#+}} ymm11 = ymm7[0,2,0,2]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm11 = ymm11[u,u,u,u,0,8],zero,ymm11[u,u,u,u,1,9],zero,ymm11[u,u,u,u,18,26],zero,ymm11[u,u,u,u,19,27],zero,ymm11[u,u,u,u]
+; AVX512-NEXT: vinserti64x4 $1, %ymm12, %zmm11, %zmm11
+; AVX512-NEXT: vpshufb {{.*#+}} xmm12 = xmm2[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
+; AVX512-NEXT: vpermq {{.*#+}} ymm12 = ymm12[0,0,1,0]
+; AVX512-NEXT: vpandn %ymm12, %ymm13, %ymm12
+; AVX512-NEXT: vpshuflw {{.*#+}} xmm13 = xmm2[1,1,0,0,4,5,6,7]
+; AVX512-NEXT: vpshufd {{.*#+}} xmm13 = xmm13[0,1,2,0]
+; AVX512-NEXT: vpermq {{.*#+}} ymm13 = ymm13[0,0,1,0]
+; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm13, %ymm13
+; AVX512-NEXT: vinserti64x4 $1, %ymm12, %zmm13, %zmm12
+; AVX512-NEXT: vporq %zmm12, %zmm11, %zmm11
+; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm10, %zmm11
+; AVX512-NEXT: vpermq {{.*#+}} ymm8 = ymm8[3,1,1,3]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm8 = ymm8[1],zero,zero,ymm8[u,u,u,10,2],zero,zero,ymm8[u,u,u,11,3],zero,zero,ymm8[u,u,u,20,28],zero,zero,ymm8[u,u,u,21,29],zero,zero,ymm8[u]
+; AVX512-NEXT: vpermq {{.*#+}} ymm9 = ymm9[1,3,3,1]
+; AVX512-NEXT: vpshufb {{.*#+}} ymm9 = zero,ymm9[1,9,u,u,u],zero,zero,ymm9[2,10,u,u,u],zero,zero,ymm9[3,19,u,u,u],zero,zero,ymm9[28,20,u,u,u],zero,zero,ymm9[29,21,u]
+; AVX512-NEXT: vpor %ymm8, %ymm9, %ymm8
+; AVX512-NEXT: vpshufhw {{.*#+}} xmm9 = xmm2[0,1,2,3,4,5,5,6]
+; AVX512-NEXT: vpshufd {{.*#+}} xmm9 = xmm9[2,2,3,3]
+; AVX512-NEXT: vpermq {{.*#+}} ymm9 = ymm9[0,1,0,1]
+; AVX512-NEXT: vpermq {{.*#+}} ymm7 = ymm7[1,3,1,3]
; AVX512-NEXT: vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,1,9],zero,ymm7[u,u,u,u,2,10],zero,ymm7[u,u,u,u,19,27],zero,ymm7[u,u,u,u,20,28],zero,ymm7[u,u,u,u,21]
-; AVX512-NEXT: vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm6, %ymm7
-; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm7
-; AVX512-NEXT: vinserti32x4 $2, %xmm5, %zmm7, %zmm4
-; AVX512-NEXT: vpshufb {{.*#+}} ymm6 = ymm3[u,u,u,u,u,5],zero,ymm3[u,u,u,u,u,6],zero,ymm3[u,u,u,u,u],zero,ymm3[23,u,u,u,u,u],zero,ymm3[24,u,u,u,u]
-; AVX512-NEXT: vpermq {{.*#+}} ymm7 = ymm3[2,3,0,1]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,u,u],zero,ymm7[5,u,u,u,u,u],zero,ymm7[6,u,u,u,u,u,23],zero,ymm7[u,u,u,u,u,24],zero,ymm7[u,u,u,u]
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm8 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255]
-; AVX512-NEXT: vpternlogq $50, %ymm6, %ymm8, %ymm7
-; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,0,2]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = zero,zero,ymm3[0,8,u,u,u],zero,zero,ymm3[1,9,u,u,u],zero,zero,ymm3[18,26,u,u,u],zero,zero,ymm3[19,27,u,u,u],zero,zero,ymm3[20,28]
-; AVX512-NEXT: vinserti64x4 $1, %ymm7, %zmm3, %zmm3
-; AVX512-NEXT: vpshufb {{.*#+}} ymm6 = ymm2[u,u,u,5],zero,ymm2[u,u,u,u,u,6],zero,ymm2[u,u,u,u,u],zero,ymm2[23,u,u,u,u,u],zero,ymm2[24,u,u,u,u,u],zero
-; AVX512-NEXT: vpermq {{.*#+}} ymm7 = ymm2[2,3,0,1]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm7 = ymm7[u,u,u],zero,ymm7[5,u,u,u,u,u],zero,ymm7[6,u,u,u,u,u,23],zero,ymm7[u,u,u,u,u,24],zero,ymm7[u,u,u,u,u,25]
-; AVX512-NEXT: vpternlogq $200, %ymm6, %ymm8, %ymm7
-; AVX512-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,0,2]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[0,8],zero,zero,ymm2[u,u,u,1,9],zero,zero,ymm2[u,u,u,2,10],zero,zero,ymm2[u,u,u,19,27],zero,zero,ymm2[u,u,u,20,28],zero,zero
-; AVX512-NEXT: vinserti64x4 $1, %ymm7, %zmm2, %zmm2
-; AVX512-NEXT: vporq %zmm3, %zmm2, %zmm2
-; AVX512-NEXT: vpshufb {{.*#+}} xmm3 = xmm1[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
-; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,0,1,0]
-; AVX512-NEXT: vmovdqa {{.*#+}} ymm6 = [255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255]
-; AVX512-NEXT: vpandn %ymm3, %ymm6, %ymm3
-; AVX512-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[1,1,0,0,4,5,6,7]
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
-; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,1,0]
-; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
-; AVX512-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm1
-; AVX512-NEXT: vpshufb {{.*#+}} ymm3 = ymm0[4],zero,ymm0[u,u,u,u,u,5],zero,ymm0[u,u,u,u,u,6],zero,ymm0[u,u,u,u,u],zero,ymm0[23,u,u,u,u,u],zero,ymm0[24,u,u]
-; AVX512-NEXT: vpermq {{.*#+}} ymm7 = ymm0[2,3,0,1]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm7 = zero,ymm7[4,u,u,u,u,u],zero,ymm7[5,u,u,u,u,u],zero,ymm7[6,u,u,u,u,u,23],zero,ymm7[u,u,u,u,u,24],zero,ymm7[u,u]
-; AVX512-NEXT: vpternlogq $200, %ymm3, %ymm6, %ymm7
-; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,0,2]
-; AVX512-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,0,8],zero,ymm0[u,u,u,u,1,9],zero,ymm0[u,u,u,u,18,26],zero,ymm0[u,u,u,u,19,27],zero,ymm0[u,u,u,u]
-; AVX512-NEXT: vinserti64x4 $1, %ymm7, %zmm0, %zmm0
-; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0
-; AVX512-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
-; AVX512-NEXT: vmovdqa %xmm5, 96(%rax)
-; AVX512-NEXT: vmovdqa64 %zmm0, (%rax)
-; AVX512-NEXT: vmovdqa %ymm4, 64(%rax)
+; AVX512-NEXT: vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm9, %ymm7
+; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm7
+; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm5[8],xmm6[8],xmm5[9],xmm6[9],xmm5[10],xmm6[10],xmm5[11],xmm6[11],xmm5[12],xmm6[12],xmm5[13],xmm6[13],xmm5[14],xmm6[14],xmm5[15],xmm6[15]
+; AVX512-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[u,u],zero,zero,xmm5[12,13,u,u,u],zero,zero,xmm5[14,15,u,u,u]
+; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
+; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,12,13],zero,zero,xmm0[u,u,u,14,15],zero,zero,xmm0[u,u,u]
+; AVX512-NEXT: vpor %xmm5, %xmm0, %xmm0
+; AVX512-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15]
+; AVX512-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[10],zero,xmm1[u,u,u,u,13,12],zero,xmm1[u,u,u,u,15,14],zero
+; AVX512-NEXT: vpshufb {{.*#+}} xmm2 = zero,xmm2[13,u,u,u,u],zero,zero,xmm2[14,u,u,u,u],zero,zero,xmm2[15]
+; AVX512-NEXT: vpor %xmm2, %xmm1, %xmm1
+; AVX512-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; AVX512-NEXT: vinserti32x4 $2, %xmm1, %zmm7, %zmm0
+; AVX512-NEXT: vmovdqa %xmm1, 96(%rax)
+; AVX512-NEXT: vmovdqa %ymm0, 64(%rax)
+; AVX512-NEXT: vmovdqa64 %zmm11, (%rax)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
@@ -2131,70 +2128,69 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP: # %bb.0:
; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
-; AVX512-FCP-NEXT: vmovdqa (%rdi), %xmm2
-; AVX512-FCP-NEXT: vmovdqa (%rsi), %xmm3
-; AVX512-FCP-NEXT: vmovdqa (%rdx), %xmm4
-; AVX512-FCP-NEXT: vmovdqa (%rcx), %xmm5
-; AVX512-FCP-NEXT: vmovdqa (%r8), %xmm1
-; AVX512-FCP-NEXT: vmovdqa (%r10), %xmm0
-; AVX512-FCP-NEXT: vinserti128 $1, %xmm5, %ymm4, %ymm6
-; AVX512-FCP-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm7
-; AVX512-FCP-NEXT: vinserti128 $1, (%r9), %ymm1, %ymm1
-; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm1
-; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm4[8],xmm5[8],xmm4[9],xmm5[9],xmm4[10],xmm5[10],xmm4[11],xmm5[11],xmm4[12],xmm5[12],xmm4[13],xmm5[13],xmm4[14],xmm5[14],xmm4[15],xmm5[15]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,u],zero,zero,xmm4[12,13,u,u,u],zero,zero,xmm4[14,15,u,u,u]
-; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,12,13],zero,zero,xmm2[u,u,u,14,15],zero,zero,xmm2[u,u,u]
-; AVX512-FCP-NEXT: vpor %xmm4, %xmm2, %xmm2
-; AVX512-FCP-NEXT: vextracti128 $1, %ymm1, %xmm3
-; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm3[8],xmm1[8],xmm3[9],xmm1[9],xmm3[10],xmm1[10],xmm3[11],xmm1[11],xmm3[12],xmm1[12],xmm3[13],xmm1[13],xmm3[14],xmm1[14],xmm3[15],xmm1[15]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[10],zero,xmm3[u,u,u,u,13,12],zero,xmm3[u,u,u,u,15,14],zero
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm4 = zero,xmm0[13,u,u,u,u],zero,zero,xmm0[14,u,u,u,u],zero,zero,xmm0[15]
-; AVX512-FCP-NEXT: vpor %xmm4, %xmm3, %xmm3
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm7[3,1,1,3]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[1],zero,zero,ymm2[u,u,u,10,2],zero,zero,ymm2[u,u,u,11,3],zero,zero,ymm2[u,u,u,20,28],zero,zero,ymm2[u,u,u,21,29],zero,zero,ymm2[u]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm6[1,3,3,1]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm4 = zero,ymm4[1,9,u,u,u],zero,zero,ymm4[2,10,u,u,u],zero,zero,ymm4[3,19,u,u,u],zero,zero,ymm4[28,20,u,u,u],zero,zero,ymm4[29,21,u]
-; AVX512-FCP-NEXT: vpor %ymm2, %ymm4, %ymm2
-; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm4 = xmm0[0,1,2,3,4,5,5,6]
-; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [2,2,3,3,2,2,3,3]
-; AVX512-FCP-NEXT: # ymm5 = mem[0,1,0,1]
-; AVX512-FCP-NEXT: vpermd %ymm4, %ymm5, %ymm4
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm1[1,3,1,3]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,1,9],zero,ymm5[u,u,u,u,2,10],zero,ymm5[u,u,u,u,19,27],zero,ymm5[u,u,u,u,20,28],zero,ymm5[u,u,u,u,21]
-; AVX512-FCP-NEXT: vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm5
-; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm5
-; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm3, %zmm5, %zmm2
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm6[0,2,0,2]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm4 = zero,zero,ymm4[0,8,u,u,u],zero,zero,ymm4[1,9,u,u,u],zero,zero,ymm4[18,26,u,u,u],zero,zero,ymm4[19,27,u,u,u],zero,zero,ymm4[20,28]
-; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [1,5,2,6,1,5,2,6]
-; AVX512-FCP-NEXT: # ymm5 = mem[0,1,0,1]
-; AVX512-FCP-NEXT: vpermd %ymm6, %ymm5, %ymm6
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[u,u,u],zero,zero,ymm6[1,5,u,u,u],zero,zero,ymm6[2,6,u,u,u],zero,zero,ymm6[19,23,u,u,u],zero,zero,ymm6[24,28,u,u,u],zero
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm4, %zmm4
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm7[0,2,0,2]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[0,8],zero,zero,ymm6[u,u,u,1,9],zero,zero,ymm6[u,u,u,2,10],zero,zero,ymm6[u,u,u,19,27],zero,zero,ymm6[u,u,u,20,28],zero,zero
-; AVX512-FCP-NEXT: vpermd %ymm7, %ymm5, %ymm7
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,1,5],zero,zero,ymm7[u,u,u,2,6],zero,zero,ymm7[u,u,u,19,23],zero,zero,ymm7[u,u,u,24,28],zero,zero,ymm7[u,u,u,25]
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm6, %zmm6
-; AVX512-FCP-NEXT: vporq %zmm4, %zmm6, %zmm4
-; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm6 = xmm0[1,1,0,0,4,5,6,7]
-; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm7 = [0,1,0,1,0,0,0,0]
-; AVX512-FCP-NEXT: vpermd %ymm6, %ymm7, %ymm6
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm6, %zmm0
-; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm1[0,2,0,2]
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,0,8],zero,ymm6[u,u,u,u,1,9],zero,ymm6[u,u,u,u,18,26],zero,ymm6[u,u,u,u,19,27],zero,ymm6[u,u,u,u]
-; AVX512-FCP-NEXT: vpermd %ymm1, %ymm5, %ymm1
-; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[0,4],zero,ymm1[u,u,u,u,1,5],zero,ymm1[u,u,u,u,2,6],zero,ymm1[u,u,u,u,19,23],zero,ymm1[u,u,u,u,24,28],zero,ymm1[u]
-; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm6, %zmm1
-; AVX512-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
-; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm1
-; AVX512-FCP-NEXT: vmovdqa %xmm3, 96(%rax)
-; AVX512-FCP-NEXT: vmovdqa64 %zmm1, (%rax)
-; AVX512-FCP-NEXT: vmovdqa %ymm2, 64(%rax)
+; AVX512-FCP-NEXT: vmovdqa (%rdi), %xmm0
+; AVX512-FCP-NEXT: vmovdqa (%rsi), %xmm1
+; AVX512-FCP-NEXT: vmovdqa (%rdx), %xmm5
+; AVX512-FCP-NEXT: vmovdqa (%rcx), %xmm6
+; AVX512-FCP-NEXT: vmovdqa (%r8), %xmm3
+; AVX512-FCP-NEXT: vmovdqa (%r9), %xmm4
+; AVX512-FCP-NEXT: vmovdqa (%r10), %xmm2
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm7
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm6, %ymm5, %ymm8
+; AVX512-FCP-NEXT: vinserti128 $1, %xmm4, %ymm3, %ymm9
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm8[0,2,0,2]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm10 = zero,zero,ymm10[0,8,u,u,u],zero,zero,ymm10[1,9,u,u,u],zero,zero,ymm10[18,26,u,u,u],zero,zero,ymm10[19,27,u,u,u],zero,zero,ymm10[20,28]
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm11 = [1,5,2,6,1,5,2,6]
+; AVX512-FCP-NEXT: # ymm11 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpermd %ymm8, %ymm11, %ymm12
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm12[u,u,u],zero,zero,ymm12[1,5,u,u,u],zero,zero,ymm12[2,6,u,u,u],zero,zero,ymm12[19,23,u,u,u],zero,zero,ymm12[24,28,u,u,u],zero
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm12, %zmm10, %zmm10
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm7[0,2,0,2]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm12[0,8],zero,zero,ymm12[u,u,u,1,9],zero,zero,ymm12[u,u,u,2,10],zero,zero,ymm12[u,u,u,19,27],zero,zero,ymm12[u,u,u,20,28],zero,zero
+; AVX512-FCP-NEXT: vpermd %ymm7, %ymm11, %ymm13
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm13 = ymm13[u,u,u,1,5],zero,zero,ymm13[u,u,u,2,6],zero,zero,ymm13[u,u,u,19,23],zero,zero,ymm13[u,u,u,24,28],zero,zero,ymm13[u,u,u,25]
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm13, %zmm12, %zmm12
+; AVX512-FCP-NEXT: vporq %zmm10, %zmm12, %zmm10
+; AVX512-FCP-NEXT: vpshuflw {{.*#+}} xmm12 = xmm2[1,1,0,0,4,5,6,7]
+; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm13 = [0,1,0,1,0,0,0,0]
+; AVX512-FCP-NEXT: vpermd %ymm12, %ymm13, %ymm12
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm13 = xmm2[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[0,0,1,0]
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm13, %zmm12, %zmm12
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm9[0,2,0,2]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm13 = ymm13[u,u,u,u,0,8],zero,ymm13[u,u,u,u,1,9],zero,ymm13[u,u,u,u,18,26],zero,ymm13[u,u,u,u,19,27],zero,ymm13[u,u,u,u]
+; AVX512-FCP-NEXT: vpermd %ymm9, %ymm11, %ymm11
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm11[0,4],zero,ymm11[u,u,u,u,1,5],zero,ymm11[u,u,u,u,2,6],zero,ymm11[u,u,u,u,19,23],zero,ymm11[u,u,u,u,24,28],zero,ymm11[u]
+; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm13, %zmm11
+; AVX512-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm12, %zmm11
+; AVX512-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm10, %zmm11
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[3,1,1,3]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm7[1],zero,zero,ymm7[u,u,u,10,2],zero,zero,ymm7[u,u,u,11,3],zero,zero,ymm7[u,u,u,20,28],zero,zero,ymm7[u,u,u,21,29],zero,zero,ymm7[u]
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[1,3,3,1]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm8 = zero,ymm8[1,9,u,u,u],zero,zero,ymm8[2,10,u,u,u],zero,zero,ymm8[3,19,u,u,u],zero,zero,ymm8[28,20,u,u,u],zero,zero,ymm8[29,21,u]
+; AVX512-FCP-NEXT: vpor %ymm7, %ymm8, %ymm7
+; AVX512-FCP-NEXT: vpshufhw {{.*#+}} xmm8 = xmm2[0,1,2,3,4,5,5,6]
+; AVX512-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm10 = [2,2,3,3,2,2,3,3]
+; AVX512-FCP-NEXT: # ymm10 = mem[0,1,0,1]
+; AVX512-FCP-NEXT: vpermd %ymm8, %ymm10, %ymm8
+; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[1,3,1,3]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm9[u,u,u,1,9],zero,ymm9[u,u,u,u,2,10],zero,ymm9[u,u,u,u,19,27],zero,ymm9[u,u,u,u,20,28],zero,ymm9[u,u,u,u,21]
+; AVX512-FCP-NEXT: vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm9
+; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm7, %ymm9
+; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm5[8],xmm6[8],xmm5[9],xmm6[9],xmm5[10],xmm6[10],xmm5[11],xmm6[11],xmm5[12],xmm6[12],xmm5[13],xmm6[13],xmm5[14],xmm6[14],xmm5[15],xmm6[15]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[u,u],zero,zero,xmm5[12,13,u,u,u],zero,zero,xmm5[14,15,u,u,u]
+; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,12,13],zero,zero,xmm0[u,u,u,14,15],zero,zero,xmm0[u,u,u]
+; AVX512-FCP-NEXT: vpor %xmm5, %xmm0, %xmm0
+; AVX512-FCP-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15]
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[10],zero,xmm1[u,u,u,u,13,12],zero,xmm1[u,u,u,u,15,14],zero
+; AVX512-FCP-NEXT: vpshufb {{.*#+}} xmm2 = zero,xmm2[13,u,u,u,u],zero,zero,xmm2[14,u,u,u,u],zero,zero,xmm2[15]
+; AVX512-FCP-NEXT: vpor %xmm2, %xmm1, %xmm1
+; AVX512-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; AVX512-FCP-NEXT: vinserti32x4 $2, %xmm1, %zmm9, %zmm0
+; AVX512-FCP-NEXT: vmovdqa %xmm1, 96(%rax)
+; AVX512-FCP-NEXT: vmovdqa64 %zmm11, (%rax)
+; AVX512-FCP-NEXT: vmovdqa %ymm0, 64(%rax)
; AVX512-FCP-NEXT: vzeroupper
; AVX512-FCP-NEXT: retq
;
@@ -2202,77 +2198,76 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512DQ-NEXT: movq {{[0-9]+}}(%rsp), %r10
-; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm4
-; AVX512DQ-NEXT: vmovdqa (%rsi), %xmm5
-; AVX512DQ-NEXT: vmovdqa (%rdx), %xmm6
-; AVX512DQ-NEXT: vmovdqa (%rcx), %xmm7
-; AVX512DQ-NEXT: vmovdqa (%r8), %xmm0
-; AVX512DQ-NEXT: vmovdqa (%r10), %xmm1
-; AVX512DQ-NEXT: vinserti128 $1, %xmm7, %ymm6, %ymm3
-; AVX512DQ-NEXT: vinserti128 $1, %xmm5, %ymm4, %ymm2
-; AVX512DQ-NEXT: vinserti128 $1, (%r9), %ymm0, %ymm0
-; AVX512DQ-NEXT: vinserti32x4 $2, %xmm1, %zmm0, %zmm0
-; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm6[8],xmm7[8],xmm6[9],xmm7[9],xmm6[10],xmm7[10],xmm6[11],xmm7[11],xmm6[12],xmm7[12],xmm6[13],xmm7[13],xmm6[14],xmm7[14],xmm6[15],xmm7[15]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[u,u],zero,zero,xmm6[12,13,u,u,u],zero,zero,xmm6[14,15,u,u,u]
-; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm4[8],xmm5[8],xmm4[9],xmm5[9],xmm4[10],xmm5[10],xmm4[11],xmm5[11],xmm4[12],xmm5[12],xmm4[13],xmm5[13],xmm4[14],xmm5[14],xmm4[15],xmm5[15]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,u,12,13],zero,zero,xmm4[u,u,u,14,15],zero,zero,xmm4[u,u,u]
-; AVX512DQ-NEXT: vpor %xmm6, %xmm4, %xmm4
-; AVX512DQ-NEXT: vextracti128 $1, %ymm0, %xmm5
-; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[10],zero,xmm5[u,u,u,u,13,12],zero,xmm5[u,u,u,u,15,14],zero
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm6 = zero,xmm1[13,u,u,u,u],zero,zero,xmm1[14,u,u,u,u],zero,zero,xmm1[15]
-; AVX512DQ-NEXT: vpor %xmm6, %xmm5, %xmm5
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4, %xmm5
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm4 = ymm2[3,1,1,3]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[1],zero,zero,ymm4[u,u,u,10,2],zero,zero,ymm4[u,u,u,11,3],zero,zero,ymm4[u,u,u,20,28],zero,zero,ymm4[u,u,u,21,29],zero,zero,ymm4[u]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm6 = ymm3[1,3,3,1]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm6 = zero,ymm6[1,9,u,u,u],zero,zero,ymm6[2,10,u,u,u],zero,zero,ymm6[3,19,u,u,u],zero,zero,ymm6[28,20,u,u,u],zero,zero,ymm6[29,21,u]
-; AVX512DQ-NEXT: vpor %ymm4, %ymm6, %ymm4
-; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm6 = xmm1[0,1,2,3,4,5,5,6]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[2,2,3,3]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,1,0,1]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm7 = ymm0[1,3,1,3]
+; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm0
+; AVX512DQ-NEXT: vmovdqa (%rsi), %xmm1
+; AVX512DQ-NEXT: vmovdqa (%rdx), %xmm5
+; AVX512DQ-NEXT: vmovdqa (%rcx), %xmm6
+; AVX512DQ-NEXT: vmovdqa (%r8), %xmm3
+; AVX512DQ-NEXT: vmovdqa (%r9), %xmm4
+; AVX512DQ-NEXT: vmovdqa (%r10), %xmm2
+; AVX512DQ-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm8
+; AVX512DQ-NEXT: vinserti128 $1, %xmm6, %ymm5, %ymm9
+; AVX512DQ-NEXT: vinserti128 $1, %xmm4, %ymm3, %ymm7
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm10 = ymm9[u,u,u,u,u,5],zero,ymm9[u,u,u,u,u,6],zero,ymm9[u,u,u,u,u],zero,ymm9[23,u,u,u,u,u],zero,ymm9[24,u,u,u,u]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm11 = ymm9[2,3,0,1]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm11 = ymm11[u,u,u,u,u],zero,ymm11[5,u,u,u,u,u],zero,ymm11[6,u,u,u,u,u,23],zero,ymm11[u,u,u,u,u,24],zero,ymm11[u,u,u,u]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm12 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255]
+; AVX512DQ-NEXT: vpternlogq $50, %ymm10, %ymm12, %ymm11
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm10 = ymm9[0,2,0,2]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm10 = zero,zero,ymm10[0,8,u,u,u],zero,zero,ymm10[1,9,u,u,u],zero,zero,ymm10[18,26,u,u,u],zero,zero,ymm10[19,27,u,u,u],zero,zero,ymm10[20,28]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm11, %zmm10, %zmm10
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm11 = ymm8[u,u,u,5],zero,ymm8[u,u,u,u,u,6],zero,ymm8[u,u,u,u,u],zero,ymm8[23,u,u,u,u,u],zero,ymm8[24,u,u,u,u,u],zero
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm13 = ymm8[2,3,0,1]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm13 = ymm13[u,u,u],zero,ymm13[5,u,u,u,u,u],zero,ymm13[6,u,u,u,u,u,23],zero,ymm13[u,u,u,u,u,24],zero,ymm13[u,u,u,u,u,25]
+; AVX512DQ-NEXT: vpternlogq $200, %ymm11, %ymm12, %ymm13
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm11 = ymm8[0,2,0,2]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm11 = ymm11[0,8],zero,zero,ymm11[u,u,u,1,9],zero,zero,ymm11[u,u,u,2,10],zero,zero,ymm11[u,u,u,19,27],zero,zero,ymm11[u,u,u,20,28],zero,zero
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm13, %zmm11, %zmm11
+; AVX512DQ-NEXT: vporq %zmm10, %zmm11, %zmm10
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm11 = ymm7[4],zero,ymm7[u,u,u,u,u,5],zero,ymm7[u,u,u,u,u,6],zero,ymm7[u,u,u,u,u],zero,ymm7[23,u,u,u,u,u],zero,ymm7[24,u,u]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm12 = ymm7[2,3,0,1]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm12 = zero,ymm12[4,u,u,u,u,u],zero,ymm12[5,u,u,u,u,u],zero,ymm12[6,u,u,u,u,u,23],zero,ymm12[u,u,u,u,u,24],zero,ymm12[u,u]
+; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm13 = [255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255]
+; AVX512DQ-NEXT: vpternlogq $200, %ymm11, %ymm13, %ymm12
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm11 = ymm7[0,2,0,2]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm11 = ymm11[u,u,u,u,0,8],zero,ymm11[u,u,u,u,1,9],zero,ymm11[u,u,u,u,18,26],zero,ymm11[u,u,u,u,19,27],zero,ymm11[u,u,u,u]
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm12, %zmm11, %zmm11
+; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm12 = xmm2[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm12 = ymm12[0,0,1,0]
+; AVX512DQ-NEXT: vpandn %ymm12, %ymm13, %ymm12
+; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm13 = xmm2[1,1,0,0,4,5,6,7]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm13 = xmm13[0,1,2,0]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm13 = ymm13[0,0,1,0]
+; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm13, %ymm13
+; AVX512DQ-NEXT: vinserti64x4 $1, %ymm12, %zmm13, %zmm12
+; AVX512DQ-NEXT: vporq %zmm12, %zmm11, %zmm11
+; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm10, %zmm11
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm8 = ymm8[3,1,1,3]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm8 = ymm8[1],zero,zero,ymm8[u,u,u,10,2],zero,zero,ymm8[u,u,u,11,3],zero,zero,ymm8[u,u,u,20,28],zero,zero,ymm8[u,u,u,21,29],zero,zero,ymm8[u]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm9 = ymm9[1,3,3,1]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm9 = zero,ymm9[1,9,u,u,u],zero,zero,ymm9[2,10,u,u,u],zero,zero,ymm9[3,19,u,u,u],zero,zero,ymm9[28,20,u,u,u],zero,zero,ymm9[29,21,u]
+; AVX512DQ-NEXT: vpor %ymm8, %ymm9, %ymm8
+; AVX512DQ-NEXT: vpshufhw {{.*#+}} xmm9 = xmm2[0,1,2,3,4,5,5,6]
+; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm9 = xmm9[2,2,3,3]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm9 = ymm9[0,1,0,1]
+; AVX512DQ-NEXT: vpermq {{.*#+}} ymm7 = ymm7[1,3,1,3]
; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,1,9],zero,ymm7[u,u,u,u,2,10],zero,ymm7[u,u,u,u,19,27],zero,ymm7[u,u,u,u,20,28],zero,ymm7[u,u,u,u,21]
-; AVX512DQ-NEXT: vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm6, %ymm7
-; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm7
-; AVX512DQ-NEXT: vinserti32x4 $2, %xmm5, %zmm7, %zmm4
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm6 = ymm3[u,u,u,u,u,5],zero,ymm3[u,u,u,u,u,6],zero,ymm3[u,u,u,u,u],zero,ymm3[23,u,u,u,u,u],zero,ymm3[24,u,u,u,u]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm7 = ymm3[2,3,0,1]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,u,u],zero,ymm7[5,u,u,u,u,u],zero,ymm7[6,u,u,u,u,u,23],zero,ymm7[u,u,u,u,u,24],zero,ymm7[u,u,u,u]
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm8 = [255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255,255,0,0,255,255,255,255]
-; AVX512DQ-NEXT: vpternlogq $50, %ymm6, %ymm8, %ymm7
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,0,2]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm3 = zero,zero,ymm3[0,8,u,u,u],zero,zero,ymm3[1,9,u,u,u],zero,zero,ymm3[18,26,u,u,u],zero,zero,ymm3[19,27,u,u,u],zero,zero,ymm3[20,28]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm7, %zmm3, %zmm3
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm6 = ymm2[u,u,u,5],zero,ymm2[u,u,u,u,u,6],zero,ymm2[u,u,u,u,u],zero,ymm2[23,u,u,u,u,u],zero,ymm2[24,u,u,u,u,u],zero
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm7 = ymm2[2,3,0,1]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm7 = ymm7[u,u,u],zero,ymm7[5,u,u,u,u,u],zero,ymm7[6,u,u,u,u,u,23],zero,ymm7[u,u,u,u,u,24],zero,ymm7[u,u,u,u,u,25]
-; AVX512DQ-NEXT: vpternlogq $200, %ymm6, %ymm8, %ymm7
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,0,2]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[0,8],zero,zero,ymm2[u,u,u,1,9],zero,zero,ymm2[u,u,u,2,10],zero,zero,ymm2[u,u,u,19,27],zero,zero,ymm2[u,u,u,20,28],zero,zero
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm7, %zmm2, %zmm2
-; AVX512DQ-NEXT: vporq %zmm3, %zmm2, %zmm2
-; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm3 = xmm1[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,0,1,0]
-; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm6 = [255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255,255,255,255,255,255,0,255]
-; AVX512DQ-NEXT: vpandn %ymm3, %ymm6, %ymm3
-; AVX512DQ-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[1,1,0,0,4,5,6,7]
-; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,1,0]
-; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm1
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm3 = ymm0[4],zero,ymm0[u,u,u,u,u,5],zero,ymm0[u,u,u,u,u,6],zero,ymm0[u,u,u,u,u],zero,ymm0[23,u,u,u,u,u],zero,ymm0[24,u,u]
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm7 = ymm0[2,3,0,1]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm7 = zero,ymm7[4,u,u,u,u,u],zero,ymm7[5,u,u,u,u,u],zero,ymm7[6,u,u,u,u,u,23],zero,ymm7[u,u,u,u,u,24],zero,ymm7[u,u]
-; AVX512DQ-NEXT: vpternlogq $200, %ymm3, %ymm6, %ymm7
-; AVX512DQ-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,0,2]
-; AVX512DQ-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,0,8],zero,ymm0[u,u,u,u,1,9],zero,ymm0[u,u,u,u,18,26],zero,ymm0[u,u,u,u,19,27],zero,ymm0[u,u,u,u]
-; AVX512DQ-NEXT: vinserti64x4 $1, %ymm7, %zmm0, %zmm0
-; AVX512DQ-NEXT: vporq %zmm1, %zmm0, %zmm0
-; AVX512DQ-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0
-; AVX512DQ-NEXT: vmovdqa %xmm5, 96(%rax)
-; AVX512DQ-NEXT: vmovdqa64 %zmm0, (%rax)
-; AVX512DQ-NEXT: vmovdqa %ymm4, 64(%rax)
+; AVX512DQ-NEXT: vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm9, %ymm7
+; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm7
+; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm5[8],xmm6[8],xmm5[9],xmm6[9],xmm5[10],xmm6[10],xmm5[11],xmm6[11],xmm5[12],xmm6[12],xmm5[13],xmm6[13],xmm5[14],xmm6[14],xmm5[15],xmm6[15]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[u,u],zero,zero,xmm5[12,13,u,u,u],zero,zero,xmm5[14,15,u,u,u]
+; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,12,13],zero,zero,xmm0[u,u,u,14,15],zero,zero,xmm0[u,u,u]
+; AVX512DQ-NEXT: vpor %xmm5, %xmm0, %xmm0
+; AVX512DQ-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15]
+; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[10],zero,xmm1[u,u,u,u,13,12],zero,xmm1[u,u,u,u,15,14],zero
+; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm2 = zero,xmm2[13,u,u,u,u],zero,zero,xmm2[14,u,u,u,u],zero,zero,xmm2[15]
+; AVX512DQ-NEXT: vpor %xmm2, %xmm1, %xmm1
+; AVX512DQ-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; AVX512DQ-NEXT: vinserti32x4 $2, %xmm1, %zmm7, %zmm0
+; AVX512DQ-NEXT: vmovdqa %xmm1, 96(%rax)
+; AVX512DQ-NEXT: vmovdqa %ymm0, 64(%rax)
+; AVX512DQ-NEXT: vmovdqa64 %zmm11, (%rax)
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -2280,70 +2275,69 @@ define void @store_i8_stride7_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP: # %bb.0:
; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX512DQ-FCP-NEXT: movq {{[0-9]+}}(%rsp), %r10
-; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %xmm2
-; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %xmm3
-; AVX512DQ-FCP-NEXT: vmovdqa (%rdx), %xmm4
-; AVX512DQ-FCP-NEXT: vmovdqa (%rcx), %xmm5
-; AVX512DQ-FCP-NEXT: vmovdqa (%r8), %xmm1
-; AVX512DQ-FCP-NEXT: vmovdqa (%r10), %xmm0
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm5, %ymm4, %ymm6
-; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm7
-; AVX512DQ-FCP-NEXT: vinserti128 $1, (%r9), %ymm1, %ymm1
-; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm0, %zmm1, %zmm1
-; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm4[8],xmm5[8],xmm4[9],xmm5[9],xmm4[10],xmm5[10],xmm4[11],xmm5[11],xmm4[12],xmm5[12],xmm4[13],xmm5[13],xmm4[14],xmm5[14],xmm4[15],xmm5[15]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[u,u],zero,zero,xmm4[12,13,u,u,u],zero,zero,xmm4[14,15,u,u,u]
-; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,12,13],zero,zero,xmm2[u,u,u,14,15],zero,zero,xmm2[u,u,u]
-; AVX512DQ-FCP-NEXT: vpor %xmm4, %xmm2, %xmm2
-; AVX512DQ-FCP-NEXT: vextracti128 $1, %ymm1, %xmm3
-; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm3[8],xmm1[8],xmm3[9],xmm1[9],xmm3[10],xmm1[10],xmm3[11],xmm1[11],xmm3[12],xmm1[12],xmm3[13],xmm1[13],xmm3[14],xmm1[14],xmm3[15],xmm1[15]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[10],zero,xmm3[u,u,u,u,13,12],zero,xmm3[u,u,u,u,15,14],zero
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm4 = zero,xmm0[13,u,u,u,u],zero,zero,xmm0[14,u,u,u,u],zero,zero,xmm0[15]
-; AVX512DQ-FCP-NEXT: vpor %xmm4, %xmm3, %xmm3
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm3
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm2 = ymm7[3,1,1,3]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[1],zero,zero,ymm2[u,u,u,10,2],zero,zero,ymm2[u,u,u,11,3],zero,zero,ymm2[u,u,u,20,28],zero,zero,ymm2[u,u,u,21,29],zero,zero,ymm2[u]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm6[1,3,3,1]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm4 = zero,ymm4[1,9,u,u,u],zero,zero,ymm4[2,10,u,u,u],zero,zero,ymm4[3,19,u,u,u],zero,zero,ymm4[28,20,u,u,u],zero,zero,ymm4[29,21,u]
-; AVX512DQ-FCP-NEXT: vpor %ymm2, %ymm4, %ymm2
-; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm4 = xmm0[0,1,2,3,4,5,5,6]
-; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [2,2,3,3,2,2,3,3]
-; AVX512DQ-FCP-NEXT: # ymm5 = mem[0,1,0,1]
-; AVX512DQ-FCP-NEXT: vpermd %ymm4, %ymm5, %ymm4
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm5 = ymm1[1,3,1,3]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,1,9],zero,ymm5[u,u,u,u,2,10],zero,ymm5[u,u,u,u,19,27],zero,ymm5[u,u,u,u,20,28],zero,ymm5[u,u,u,u,21]
-; AVX512DQ-FCP-NEXT: vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm5
-; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm5
-; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm3, %zmm5, %zmm2
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm4 = ymm6[0,2,0,2]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm4 = zero,zero,ymm4[0,8,u,u,u],zero,zero,ymm4[1,9,u,u,u],zero,zero,ymm4[18,26,u,u,u],zero,zero,ymm4[19,27,u,u,u],zero,zero,ymm4[20,28]
-; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [1,5,2,6,1,5,2,6]
-; AVX512DQ-FCP-NEXT: # ymm5 = mem[0,1,0,1]
-; AVX512DQ-FCP-NEXT: vpermd %ymm6, %ymm5, %ymm6
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[u,u,u],zero,zero,ymm6[1,5,u,u,u],zero,zero,ymm6[2,6,u,u,u],zero,zero,ymm6[19,23,u,u,u],zero,zero,ymm6[24,28,u,u,u],zero
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm4, %zmm4
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm7[0,2,0,2]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[0,8],zero,zero,ymm6[u,u,u,1,9],zero,zero,ymm6[u,u,u,2,10],zero,zero,ymm6[u,u,u,19,27],zero,zero,ymm6[u,u,u,20,28],zero,zero
-; AVX512DQ-FCP-NEXT: vpermd %ymm7, %ymm5, %ymm7
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm7[u,u,u,1,5],zero,zero,ymm7[u,u,u,2,6],zero,zero,ymm7[u,u,u,19,23],zero,zero,ymm7[u,u,u,24,28],zero,zero,ymm7[u,u,u,25]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm7, %zmm6, %zmm6
-; AVX512DQ-FCP-NEXT: vporq %zmm4, %zmm6, %zmm4
-; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm6 = xmm0[1,1,0,0,4,5,6,7]
-; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm7 = [0,1,0,1,0,0,0,0]
-; AVX512DQ-FCP-NEXT: vpermd %ymm6, %ymm7, %ymm6
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,0]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm0, %zmm6, %zmm0
-; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm6 = ymm1[0,2,0,2]
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,0,8],zero,ymm6[u,u,u,u,1,9],zero,ymm6[u,u,u,u,18,26],zero,ymm6[u,u,u,u,19,27],zero,ymm6[u,u,u,u]
-; AVX512DQ-FCP-NEXT: vpermd %ymm1, %ymm5, %ymm1
-; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[0,4],zero,ymm1[u,u,u,u,1,5],zero,ymm1[u,u,u,u,2,6],zero,ymm1[u,u,u,u,19,23],zero,ymm1[u,u,u,u,24,28],zero,ymm1[u]
-; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm6, %zmm1
-; AVX512DQ-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1
-; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm4, %zmm1
-; AVX512DQ-FCP-NEXT: vmovdqa %xmm3, 96(%rax)
-; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm1, (%rax)
-; AVX512DQ-FCP-NEXT: vmovdqa %ymm2, 64(%rax)
+; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %xmm0
+; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %xmm1
+; AVX512DQ-FCP-NEXT: vmovdqa (%rdx), %xmm5
+; AVX512DQ-FCP-NEXT: vmovdqa (%rcx), %xmm6
+; AVX512DQ-FCP-NEXT: vmovdqa (%r8), %xmm3
+; AVX512DQ-FCP-NEXT: vmovdqa (%r9), %xmm4
+; AVX512DQ-FCP-NEXT: vmovdqa (%r10), %xmm2
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm7
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm6, %ymm5, %ymm8
+; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm4, %ymm3, %ymm9
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm10 = ymm8[0,2,0,2]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm10 = zero,zero,ymm10[0,8,u,u,u],zero,zero,ymm10[1,9,u,u,u],zero,zero,ymm10[18,26,u,u,u],zero,zero,ymm10[19,27,u,u,u],zero,zero,ymm10[20,28]
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm11 = [1,5,2,6,1,5,2,6]
+; AVX512DQ-FCP-NEXT: # ymm11 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpermd %ymm8, %ymm11, %ymm12
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm12[u,u,u],zero,zero,ymm12[1,5,u,u,u],zero,zero,ymm12[2,6,u,u,u],zero,zero,ymm12[19,23,u,u,u],zero,zero,ymm12[24,28,u,u,u],zero
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm12, %zmm10, %zmm10
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm12 = ymm7[0,2,0,2]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm12 = ymm12[0,8],zero,zero,ymm12[u,u,u,1,9],zero,zero,ymm12[u,u,u,2,10],zero,zero,ymm12[u,u,u,19,27],zero,zero,ymm12[u,u,u,20,28],zero,zero
+; AVX512DQ-FCP-NEXT: vpermd %ymm7, %ymm11, %ymm13
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm13 = ymm13[u,u,u,1,5],zero,zero,ymm13[u,u,u,2,6],zero,zero,ymm13[u,u,u,19,23],zero,zero,ymm13[u,u,u,24,28],zero,zero,ymm13[u,u,u,25]
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm13, %zmm12, %zmm12
+; AVX512DQ-FCP-NEXT: vporq %zmm10, %zmm12, %zmm10
+; AVX512DQ-FCP-NEXT: vpshuflw {{.*#+}} xmm12 = xmm2[1,1,0,0,4,5,6,7]
+; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm13 = [0,1,0,1,0,0,0,0]
+; AVX512DQ-FCP-NEXT: vpermd %ymm12, %ymm13, %ymm12
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm13 = xmm2[4,5,4,5,4,5,8,9,6,7,6,7,6,7,6,7]
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm13[0,0,1,0]
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm13, %zmm12, %zmm12
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm13 = ymm9[0,2,0,2]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm13 = ymm13[u,u,u,u,0,8],zero,ymm13[u,u,u,u,1,9],zero,ymm13[u,u,u,u,18,26],zero,ymm13[u,u,u,u,19,27],zero,ymm13[u,u,u,u]
+; AVX512DQ-FCP-NEXT: vpermd %ymm9, %ymm11, %ymm11
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm11 = ymm11[0,4],zero,ymm11[u,u,u,u,1,5],zero,ymm11[u,u,u,u,2,6],zero,ymm11[u,u,u,u,19,23],zero,ymm11[u,u,u,u,24,28],zero,ymm11[u]
+; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm11, %zmm13, %zmm11
+; AVX512DQ-FCP-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm12, %zmm11
+; AVX512DQ-FCP-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm10, %zmm11
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm7 = ymm7[3,1,1,3]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm7 = ymm7[1],zero,zero,ymm7[u,u,u,10,2],zero,zero,ymm7[u,u,u,11,3],zero,zero,ymm7[u,u,u,20,28],zero,zero,ymm7[u,u,u,21,29],zero,zero,ymm7[u]
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm8[1,3,3,1]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm8 = zero,ymm8[1,9,u,u,u],zero,zero,ymm8[2,10,u,u,u],zero,zero,ymm8[3,19,u,u,u],zero,zero,ymm8[28,20,u,u,u],zero,zero,ymm8[29,21,u]
+; AVX512DQ-FCP-NEXT: vpor %ymm7, %ymm8, %ymm7
+; AVX512DQ-FCP-NEXT: vpshufhw {{.*#+}} xmm8 = xmm2[0,1,2,3,4,5,5,6]
+; AVX512DQ-FCP-NEXT: vbroadcasti128 {{.*#+}} ymm10 = [2,2,3,3,2,2,3,3]
+; AVX512DQ-FCP-NEXT: # ymm10 = mem[0,1,0,1]
+; AVX512DQ-FCP-NEXT: vpermd %ymm8, %ymm10, %ymm8
+; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm9 = ymm9[1,3,1,3]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} ymm9 = ymm9[u,u,u,1,9],zero,ymm9[u,u,u,u,2,10],zero,ymm9[u,u,u,u,19,27],zero,ymm9[u,u,u,u,20,28],zero,ymm9[u,u,u,u,21]
+; AVX512DQ-FCP-NEXT: vpternlogq $244, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm8, %ymm9
+; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm7, %ymm9
+; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm5 = xmm5[8],xmm6[8],xmm5[9],xmm6[9],xmm5[10],xmm6[10],xmm5[11],xmm6[11],xmm5[12],xmm6[12],xmm5[13],xmm6[13],xmm5[14],xmm6[14],xmm5[15],xmm6[15]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[u,u],zero,zero,xmm5[12,13,u,u,u],zero,zero,xmm5[14,15,u,u,u]
+; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,12,13],zero,zero,xmm0[u,u,u,14,15],zero,zero,xmm0[u,u,u]
+; AVX512DQ-FCP-NEXT: vpor %xmm5, %xmm0, %xmm0
+; AVX512DQ-FCP-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm4[8],xmm3[8],xmm4[9],xmm3[9],xmm4[10],xmm3[10],xmm4[11],xmm3[11],xmm4[12],xmm3[12],xmm4[13],xmm3[13],xmm4[14],xmm3[14],xmm4[15],xmm3[15]
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[10],zero,xmm1[u,u,u,u,13,12],zero,xmm1[u,u,u,u,15,14],zero
+; AVX512DQ-FCP-NEXT: vpshufb {{.*#+}} xmm2 = zero,xmm2[13,u,u,u,u],zero,zero,xmm2[14,u,u,u,u],zero,zero,xmm2[15]
+; AVX512DQ-FCP-NEXT: vpor %xmm2, %xmm1, %xmm1
+; AVX512DQ-FCP-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; AVX512DQ-FCP-NEXT: vinserti32x4 $2, %xmm1, %zmm9, %zmm0
+; AVX512DQ-FCP-NEXT: vmovdqa %xmm1, 96(%rax)
+; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm11, (%rax)
+; AVX512DQ-FCP-NEXT: vmovdqa %ymm0, 64(%rax)
; AVX512DQ-FCP-NEXT: vzeroupper
; AVX512DQ-FCP-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll
index 0c76c14..4859a8e 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll
@@ -305,6 +305,37 @@ define <4 x float> @combine_vpermilvar_4f32_as_insertps(<4 x float> %a0) {
ret <4 x float> %2
}
+define <8 x i32> @combine_blend_of_permutes_v8i32(<4 x i64> %a0, <4 x i64> %a1) {
+; AVX1-LABEL: combine_blend_of_permutes_v8i32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
+; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,0,1]
+; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3,4],ymm0[5,6],ymm1[7]
+; AVX1-NEXT: ret{{[l|q]}}
+;
+; AVX2-LABEL: combine_blend_of_permutes_v8i32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,3,0,1]
+; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,3,0,1]
+; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3,4],ymm0[5,6],ymm1[7]
+; AVX2-NEXT: ret{{[l|q]}}
+;
+; AVX512-LABEL: combine_blend_of_permutes_v8i32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
+; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm2 = [4,21,6,23,16,1,2,19]
+; AVX512-NEXT: vpermt2d %zmm1, %zmm2, %zmm0
+; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
+; AVX512-NEXT: ret{{[l|q]}}
+ %s0 = shufflevector <4 x i64> %a0, <4 x i64> undef, <4 x i32> <i32 2, i32 3, i32 0, i32 1>
+ %s1 = shufflevector <4 x i64> %a1, <4 x i64> undef, <4 x i32> <i32 2, i32 3, i32 0, i32 1>
+ %x0 = bitcast <4 x i64> %s0 to <8 x i32>
+ %x1 = bitcast <4 x i64> %s1 to <8 x i32>
+ %r = shufflevector <8 x i32> %x0, <8 x i32> %x1, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 12, i32 5, i32 6, i32 15>
+ ret <8 x i32> %r
+}
+
define <2 x double> @constant_fold_vpermilvar_pd() {
; CHECK-LABEL: constant_fold_vpermilvar_pd:
; CHECK: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512f.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512f.ll
index f53b1ee..e87e810 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512f.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512f.ll
@@ -973,3 +973,47 @@ define <8 x i64> @combine_vpermvar_insertion_as_broadcast_v8i64(i64 %a0) {
%2 = call <8 x i64> @llvm.x86.avx512.permvar.di.512(<8 x i64> %1, <8 x i64> zeroinitializer)
ret <8 x i64> %2
}
+
+define <16 x i32> @blend_of_permutes_v16i32(<8 x i64> %a0, <8x i64> %a1) {
+; X86-AVX512F-LABEL: blend_of_permutes_v16i32:
+; X86-AVX512F: # %bb.0:
+; X86-AVX512F-NEXT: vpermq {{.*#+}} zmm0 = zmm0[2,3,0,1,6,7,4,5]
+; X86-AVX512F-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,0,1,6,7,4,5]
+; X86-AVX512F-NEXT: movw $-25958, %ax # imm = 0x9A9A
+; X86-AVX512F-NEXT: kmovw %eax, %k1
+; X86-AVX512F-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1}
+; X86-AVX512F-NEXT: retl
+;
+; X86-AVX512BW-LABEL: blend_of_permutes_v16i32:
+; X86-AVX512BW: # %bb.0:
+; X86-AVX512BW-NEXT: vpermq {{.*#+}} zmm0 = zmm0[2,3,0,1,6,7,4,5]
+; X86-AVX512BW-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,0,1,6,7,4,5]
+; X86-AVX512BW-NEXT: movw $-25958, %ax # imm = 0x9A9A
+; X86-AVX512BW-NEXT: kmovd %eax, %k1
+; X86-AVX512BW-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1}
+; X86-AVX512BW-NEXT: retl
+;
+; X64-AVX512F-LABEL: blend_of_permutes_v16i32:
+; X64-AVX512F: # %bb.0:
+; X64-AVX512F-NEXT: vpermq {{.*#+}} zmm0 = zmm0[2,3,0,1,6,7,4,5]
+; X64-AVX512F-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,0,1,6,7,4,5]
+; X64-AVX512F-NEXT: movw $-25958, %ax # imm = 0x9A9A
+; X64-AVX512F-NEXT: kmovw %eax, %k1
+; X64-AVX512F-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1}
+; X64-AVX512F-NEXT: retq
+;
+; X64-AVX512BW-LABEL: blend_of_permutes_v16i32:
+; X64-AVX512BW: # %bb.0:
+; X64-AVX512BW-NEXT: vpermq {{.*#+}} zmm0 = zmm0[2,3,0,1,6,7,4,5]
+; X64-AVX512BW-NEXT: vpermq {{.*#+}} zmm1 = zmm1[2,3,0,1,6,7,4,5]
+; X64-AVX512BW-NEXT: movw $-25958, %ax # imm = 0x9A9A
+; X64-AVX512BW-NEXT: kmovd %eax, %k1
+; X64-AVX512BW-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1}
+; X64-AVX512BW-NEXT: retq
+ %s0 = shufflevector <8 x i64> %a0, <8 x i64> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 6, i32 7, i32 4, i32 5>
+ %s1 = shufflevector <8 x i64> %a1, <8 x i64> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 6, i32 7, i32 4, i32 5>
+ %x0 = bitcast <8 x i64> %s0 to <16 x i32>
+ %x1 = bitcast <8 x i64> %s1 to <16 x i32>
+ %r = shufflevector <16 x i32> %x0, <16 x i32> %x1, <16 x i32> <i32 0, i32 17, i32 2, i32 19, i32 20, i32 5, i32 6, i32 23, i32 8, i32 25, i32 10, i32 27, i32 28, i32 13, i32 14, i32 31>
+ ret <16 x i32> %r
+}
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-sse41.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-sse41.ll
index 5eb017b..33851f5 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-combining-sse41.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-sse41.ll
@@ -22,6 +22,21 @@ define <16 x i8> @combine_vpshufb_as_movzx(<16 x i8> %a0) {
ret <16 x i8> %res0
}
+define <4 x i32> @combine_blend_of_permutes_v4i32(<2 x i64> %a0, <2 x i64> %a1) {
+; SSE-LABEL: combine_blend_of_permutes_v4i32:
+; SSE: # %bb.0:
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3],xmm2[4,5],xmm0[6,7]
+; SSE-NEXT: retq
+ %s0 = shufflevector <2 x i64> %a0, <2 x i64> undef, <2 x i32> <i32 1, i32 0>
+ %s1 = shufflevector <2 x i64> %a1, <2 x i64> undef, <2 x i32> <i32 1, i32 0>
+ %x0 = bitcast <2 x i64> %s0 to <4 x i32>
+ %x1 = bitcast <2 x i64> %s1 to <4 x i32>
+ %r = shufflevector <4 x i32> %x0, <4 x i32> %x1, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+ ret <4 x i32> %r
+}
+
define <16 x i8> @PR50049(ptr %p1, ptr %p2) {
; SSE-LABEL: PR50049:
; SSE: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll
index 11f422d..99e8cdb 100644
--- a/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll
+++ b/llvm/test/CodeGen/X86/zero_extend_vector_inreg_of_broadcast.ll
@@ -314,8 +314,8 @@ define void @vec64_i16_widen_to_i32_factor2_broadcast_to_v2i32_factor2(ptr %in.v
;
; AVX512F-LABEL: vec64_i16_widen_to_i32_factor2_broadcast_to_v2i32_factor2:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0
+; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
+; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,10,11,0,1,14,15,u,u,u,u,u,u,u,u]
; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
@@ -324,8 +324,8 @@ define void @vec64_i16_widen_to_i32_factor2_broadcast_to_v2i32_factor2(ptr %in.v
;
; AVX512DQ-LABEL: vec64_i16_widen_to_i32_factor2_broadcast_to_v2i32_factor2:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512DQ-NEXT: vpaddb (%rsi), %ymm0, %ymm0
+; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm0
+; AVX512DQ-NEXT: vpaddb (%rsi), %xmm0, %xmm0
; AVX512DQ-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,10,11,0,1,14,15,u,u,u,u,u,u,u,u]
; AVX512DQ-NEXT: vpaddb (%rdx), %ymm0, %ymm0
; AVX512DQ-NEXT: vmovdqa %ymm0, (%rcx)
@@ -981,7 +981,7 @@ define void @vec128_i32_widen_to_i64_factor2_broadcast_to_v2i64_factor2(ptr %in.
; AVX512F-NEXT: vpmovsxbd {{.*#+}} xmm0 = [0,5,0,7]
; AVX512F-NEXT: vmovdqa (%rdi), %ymm1
; AVX512F-NEXT: vpaddb (%rsi), %ymm1, %ymm1
-; AVX512F-NEXT: vpermd %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT: vpermd %ymm1, %ymm0, %ymm0
; AVX512F-NEXT: vpaddb (%rdx), %ymm0, %ymm0
; AVX512F-NEXT: vmovdqa %ymm0, (%rcx)
; AVX512F-NEXT: vzeroupper
@@ -992,7 +992,7 @@ define void @vec128_i32_widen_to_i64_factor2_broadcast_to_v2i64_factor2(ptr %in.
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} xmm0 = [0,5,0,7]
; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm1
; AVX512DQ-NEXT: vpaddb (%rsi), %ymm1, %ymm1
-; AVX512DQ-NEXT: vpermd %zmm1, %zmm0, %zmm0
+; AVX512DQ-NEXT: vpermd %ymm1, %ymm0, %ymm0
; AVX512DQ-NEXT: vpaddb (%rdx), %ymm0, %ymm0
; AVX512DQ-NEXT: vmovdqa %ymm0, (%rcx)
; AVX512DQ-NEXT: vzeroupper
@@ -4026,10 +4026,10 @@ define void @vec384_i16_widen_to_i64_factor4_broadcast_to_v6i64_factor6(ptr %in.
;
; AVX512F-FAST-LABEL: vec384_i16_widen_to_i64_factor4_broadcast_to_v6i64_factor6:
; AVX512F-FAST: # %bb.0:
-; AVX512F-FAST-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512F-FAST-NEXT: vpaddb (%rsi), %ymm0, %ymm0
+; AVX512F-FAST-NEXT: vmovdqa (%rdi), %xmm0
; AVX512F-FAST-NEXT: vmovdqa 48(%rdi), %xmm1
; AVX512F-FAST-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1
+; AVX512F-FAST-NEXT: vpaddb (%rsi), %xmm0, %xmm0
; AVX512F-FAST-NEXT: vpbroadcastq %xmm0, %ymm2
; AVX512F-FAST-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3],ymm2[4],ymm1[5,6,7],ymm2[8],ymm1[9,10,11],ymm2[12],ymm1[13,14,15]
; AVX512F-FAST-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
@@ -4062,10 +4062,10 @@ define void @vec384_i16_widen_to_i64_factor4_broadcast_to_v6i64_factor6(ptr %in.
;
; AVX512DQ-FAST-LABEL: vec384_i16_widen_to_i64_factor4_broadcast_to_v6i64_factor6:
; AVX512DQ-FAST: # %bb.0:
-; AVX512DQ-FAST-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512DQ-FAST-NEXT: vpaddb (%rsi), %ymm0, %ymm0
+; AVX512DQ-FAST-NEXT: vmovdqa (%rdi), %xmm0
; AVX512DQ-FAST-NEXT: vmovdqa 48(%rdi), %xmm1
; AVX512DQ-FAST-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1
+; AVX512DQ-FAST-NEXT: vpaddb (%rsi), %xmm0, %xmm0
; AVX512DQ-FAST-NEXT: vpbroadcastq %xmm0, %ymm2
; AVX512DQ-FAST-NEXT: vpblendw {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3],ymm2[4],ymm1[5,6,7],ymm2[8],ymm1[9,10,11],ymm2[12],ymm1[13,14,15]
; AVX512DQ-FAST-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
@@ -4541,9 +4541,9 @@ define void @vec384_i16_widen_to_i192_factor12_broadcast_to_v2i192_factor2(ptr %
;
; AVX512F-LABEL: vec384_i16_widen_to_i192_factor12_broadcast_to_v2i192_factor2:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512F-NEXT: vpaddb (%rsi), %ymm0, %ymm0
+; AVX512F-NEXT: vmovdqa (%rdi), %xmm0
; AVX512F-NEXT: vmovdqa 48(%rdi), %xmm1
+; AVX512F-NEXT: vpaddb (%rsi), %xmm0, %xmm0
; AVX512F-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1
; AVX512F-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7]
; AVX512F-NEXT: vpbroadcastw %xmm0, %ymm0
@@ -4559,9 +4559,9 @@ define void @vec384_i16_widen_to_i192_factor12_broadcast_to_v2i192_factor2(ptr %
;
; AVX512DQ-LABEL: vec384_i16_widen_to_i192_factor12_broadcast_to_v2i192_factor2:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm0
-; AVX512DQ-NEXT: vpaddb (%rsi), %ymm0, %ymm0
+; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm0
; AVX512DQ-NEXT: vmovdqa 48(%rdi), %xmm1
+; AVX512DQ-NEXT: vpaddb (%rsi), %xmm0, %xmm0
; AVX512DQ-NEXT: vpaddb 48(%rsi), %xmm1, %xmm1
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7]
; AVX512DQ-NEXT: vpbroadcastw %xmm0, %ymm0
diff --git a/llvm/test/MC/LoongArch/Relocations/relax-addsub.s b/llvm/test/MC/LoongArch/Relocations/relax-addsub.s
index 18e0ede..0e27d63 100644
--- a/llvm/test/MC/LoongArch/Relocations/relax-addsub.s
+++ b/llvm/test/MC/LoongArch/Relocations/relax-addsub.s
@@ -28,7 +28,7 @@
# RELAX: Relocations [
# RELAX-NEXT: Section ({{.*}}) .rela.text {
-# RELAX-NEXT: 0x4 R_LARCH_ALIGN {{.*}} 0x4
+# RELAX-NEXT: 0x4 R_LARCH_ALIGN .text 0x4
# RELAX-NEXT: 0x10 R_LARCH_PCALA_HI20 .L1 0x0
# RELAX-NEXT: 0x10 R_LARCH_RELAX - 0x0
# RELAX-NEXT: 0x14 R_LARCH_PCALA_LO12 .L1 0x0
diff --git a/llvm/test/MC/LoongArch/Relocations/relax-align.s b/llvm/test/MC/LoongArch/Relocations/relax-align.s
index 294fd9f..0246d5b 100644
--- a/llvm/test/MC/LoongArch/Relocations/relax-align.s
+++ b/llvm/test/MC/LoongArch/Relocations/relax-align.s
@@ -63,17 +63,19 @@ ret
## Test the symbol index is different from .text.
.section .text2, "ax"
.p2align 4
+.p2align 4, , 4
break 7
# RELOC: Relocations [
# RELAX-RELOC-NEXT: Section ({{.*}}) .rela.text {
-# RELAX-RELOC-NEXT: 0x24 R_LARCH_ALIGN .Lla-relax-align0 0x4
-# RELAX-RELOC-NEXT: 0x34 R_LARCH_ALIGN .Lla-relax-align0 0x5
-# RELAX-RELOC-NEXT: 0x50 R_LARCH_ALIGN .Lla-relax-align0 0x4
-# RELAX-RELOC-NEXT: 0x60 R_LARCH_ALIGN .Lla-relax-align0 0xB04
-# RELAX-RELOC-NEXT: 0x70 R_LARCH_ALIGN .Lla-relax-align0 0x4
+# RELAX-RELOC-NEXT: 0x24 R_LARCH_ALIGN .text 0x4
+# RELAX-RELOC-NEXT: 0x34 R_LARCH_ALIGN .text 0x5
+# RELAX-RELOC-NEXT: 0x50 R_LARCH_ALIGN .text 0x4
+# RELAX-RELOC-NEXT: 0x60 R_LARCH_ALIGN .text 0xB04
+# RELAX-RELOC-NEXT: 0x70 R_LARCH_ALIGN .text 0x4
# RELAX-RELOC-NEXT: }
# RELAX-RELOC-NEXT: Section ({{.*}}) .rela.text2 {
-# RELAX-RELOC-NEXT: 0x0 R_LARCH_ALIGN .Lla-relax-align1 0x4
+# RELAX-RELOC-NEXT: 0x0 R_LARCH_ALIGN .text2 0x4
+# RELAX-RELOC-NEXT: 0xC R_LARCH_ALIGN .text2 0x404
# RELAX-RELOC-NEXT: }
# RELOC-NEXT: ]
diff --git a/llvm/test/Other/lint.ll b/llvm/test/Other/lint.ll
index 6b31b31..6fd2d40c 100644
--- a/llvm/test/Other/lint.ll
+++ b/llvm/test/Other/lint.ll
@@ -124,13 +124,6 @@ define void @0() nounwind {
ret void
}
-; CHECK: va_start called in a non-varargs function
-declare void @llvm.va_start(ptr)
-define void @not_vararg(ptr %p) nounwind {
- call void @llvm.va_start(ptr %p)
- ret void
-}
-
; CHECK: Undefined behavior: Branch to non-blockaddress
define void @use_indbr() {
indirectbr ptr @foo, [label %block]
diff --git a/llvm/test/Transforms/InstCombine/minmax-intrinsics.ll b/llvm/test/Transforms/InstCombine/minmax-intrinsics.ll
index ae2e115..bd1a47bb 100644
--- a/llvm/test/Transforms/InstCombine/minmax-intrinsics.ll
+++ b/llvm/test/Transforms/InstCombine/minmax-intrinsics.ll
@@ -2581,3 +2581,92 @@ entry:
%val = call i8 @llvm.umin.i8(i8 %sub, i8 3)
ret i8 %val
}
+
+define i8 @test_umax_and(i8 %x, i8 %y) {
+; CHECK-LABEL: @test_umax_and(
+; CHECK-NEXT: [[RES:%.*]] = call i8 @llvm.umax.i8(i8 [[X1:%.*]], i8 [[Y1:%.*]])
+; CHECK-NEXT: [[RES1:%.*]] = and i8 [[RES]], -64
+; CHECK-NEXT: ret i8 [[RES1]]
+;
+ %x1 = and i8 %x, -64
+ %y1 = and i8 %y, -64
+ %res = call i8 @llvm.umax.i8(i8 %x1, i8 %y1)
+ ret i8 %res
+}
+
+define i8 @test_umin_and(i8 %x, i8 %y) {
+; CHECK-LABEL: @test_umin_and(
+; CHECK-NEXT: [[RES:%.*]] = call i8 @llvm.umin.i8(i8 [[X1:%.*]], i8 [[Y1:%.*]])
+; CHECK-NEXT: [[RES1:%.*]] = and i8 [[RES]], -64
+; CHECK-NEXT: ret i8 [[RES1]]
+;
+ %x1 = and i8 %x, -64
+ %y1 = and i8 %y, -64
+ %res = call i8 @llvm.umin.i8(i8 %x1, i8 %y1)
+ ret i8 %res
+}
+
+define i8 @test_smax_and(i8 %x, i8 %y) {
+; CHECK-LABEL: @test_smax_and(
+; CHECK-NEXT: [[RES:%.*]] = call i8 @llvm.smax.i8(i8 [[X1:%.*]], i8 [[Y1:%.*]])
+; CHECK-NEXT: [[RES1:%.*]] = and i8 [[RES]], -64
+; CHECK-NEXT: ret i8 [[RES1]]
+;
+ %x1 = and i8 %x, -64
+ %y1 = and i8 %y, -64
+ %res = call i8 @llvm.smax.i8(i8 %x1, i8 %y1)
+ ret i8 %res
+}
+
+define i8 @test_smin_and(i8 %x, i8 %y) {
+; CHECK-LABEL: @test_smin_and(
+; CHECK-NEXT: [[RES:%.*]] = call i8 @llvm.smin.i8(i8 [[X1:%.*]], i8 [[Y1:%.*]])
+; CHECK-NEXT: [[RES1:%.*]] = and i8 [[RES]], -64
+; CHECK-NEXT: ret i8 [[RES1]]
+;
+ %x1 = and i8 %x, -64
+ %y1 = and i8 %y, -64
+ %res = call i8 @llvm.smin.i8(i8 %x1, i8 %y1)
+ ret i8 %res
+}
+
+define i8 @test_smin_and_mismatch(i8 %x, i8 %y) {
+; CHECK-LABEL: @test_smin_and_mismatch(
+; CHECK-NEXT: [[X1:%.*]] = and i8 [[X:%.*]], -64
+; CHECK-NEXT: [[Y1:%.*]] = and i8 [[Y:%.*]], -32
+; CHECK-NEXT: [[RES:%.*]] = call i8 @llvm.smin.i8(i8 [[X1]], i8 [[Y1]])
+; CHECK-NEXT: ret i8 [[RES]]
+;
+ %x1 = and i8 %x, -64
+ %y1 = and i8 %y, -32
+ %res = call i8 @llvm.smin.i8(i8 %x1, i8 %y1)
+ ret i8 %res
+}
+
+define i8 @test_smin_and_non_negated_pow2(i8 %x, i8 %y) {
+; CHECK-LABEL: @test_smin_and_non_negated_pow2(
+; CHECK-NEXT: [[X1:%.*]] = and i8 [[X:%.*]], 31
+; CHECK-NEXT: [[Y1:%.*]] = and i8 [[Y:%.*]], 31
+; CHECK-NEXT: [[RES:%.*]] = call i8 @llvm.smin.i8(i8 [[X1]], i8 [[Y1]])
+; CHECK-NEXT: ret i8 [[RES]]
+;
+ %x1 = and i8 %x, 31
+ %y1 = and i8 %y, 31
+ %res = call i8 @llvm.smin.i8(i8 %x1, i8 %y1)
+ ret i8 %res
+}
+
+define i8 @test_smin_and_multiuse(i8 %x, i8 %y) {
+; CHECK-LABEL: @test_smin_and_multiuse(
+; CHECK-NEXT: [[X1:%.*]] = and i8 [[X:%.*]], 31
+; CHECK-NEXT: [[Y1:%.*]] = and i8 [[Y:%.*]], 31
+; CHECK-NEXT: call void @use(i8 [[Y1]])
+; CHECK-NEXT: [[RES:%.*]] = call i8 @llvm.smin.i8(i8 [[X1]], i8 [[Y1]])
+; CHECK-NEXT: ret i8 [[RES]]
+;
+ %x1 = and i8 %x, 31
+ %y1 = and i8 %y, 31
+ call void @use(i8 %y1)
+ %res = call i8 @llvm.smin.i8(i8 %x1, i8 %y1)
+ ret i8 %res
+}
diff --git a/llvm/test/Transforms/JumpThreading/pr79175.ll b/llvm/test/Transforms/JumpThreading/pr79175.ll
index 2c7ee07..cce30ce 100644
--- a/llvm/test/Transforms/JumpThreading/pr79175.ll
+++ b/llvm/test/Transforms/JumpThreading/pr79175.ll
@@ -17,11 +17,11 @@ define i32 @test(i64 %idx, i32 %val) {
; CHECK: cond.end:
; CHECK-NEXT: [[CMP_I:%.*]] = icmp sgt i32 [[VAL]], 0
; CHECK-NEXT: [[COND_FR:%.*]] = freeze i1 [[CMP_I]]
-; CHECK-NEXT: br i1 [[COND_FR]], label [[COND_END_THREAD]], label [[TMP0:%.*]]
-; CHECK: cond.end.thread:
-; CHECK-NEXT: br label [[TMP0]]
+; CHECK-NEXT: br i1 [[COND_FR]], label [[TMP0:%.*]], label [[COND_END_THREAD]]
; CHECK: 0:
-; CHECK-NEXT: [[TMP1:%.*]] = phi i32 [ 0, [[COND_END_THREAD]] ], [ [[VAL]], [[COND_END]] ]
+; CHECK-NEXT: br label [[COND_END_THREAD]]
+; CHECK: cond.end.thread:
+; CHECK-NEXT: [[TMP1:%.*]] = phi i32 [ [[VAL]], [[COND_END]] ], [ 0, [[TMP0]] ], [ 0, [[FOR_BODY]] ]
; CHECK-NEXT: [[F_IDX:%.*]] = getelementptr inbounds i32, ptr @f, i64 [[IDX]]
; CHECK-NEXT: store i32 [[TMP1]], ptr [[F_IDX]], align 4
; CHECK-NEXT: [[F_RELOAD:%.*]] = load i32, ptr @f, align 4
diff --git a/llvm/test/Transforms/JumpThreading/select.ll b/llvm/test/Transforms/JumpThreading/select.ll
index 4ec55a6..27ebf4c 100644
--- a/llvm/test/Transforms/JumpThreading/select.ll
+++ b/llvm/test/Transforms/JumpThreading/select.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals
; RUN: opt -S -passes="jump-threading" -debug-only=branch-prob < %s 2>&1 | FileCheck %s
-; RUN: opt -S -passes="require<branch-prob>,jump-threading" -debug-only=branch-prob < %s 2>&1 | FileCheck -check-prefixes=CHECK,CHECK-BPI %s
+; RUN: opt -S -passes="require<branch-prob>,jump-threading" -debug-only=branch-prob -disable-output < %s 2>&1 | FileCheck -check-prefix=CHECK-BPI %s
; REQUIRES: asserts
; CHECK-BPI-LABEL: ---- Branch Probability Info : unfold1 ----
@@ -21,7 +21,7 @@ declare void @quux()
; booleans where at least one operand is true/false/undef.
;.
-; CHECK: @[[ANCHOR:[a-zA-Z0-9_$"\\.-]+]] = constant [3 x ptr] [ptr blockaddress(@test_indirectbr, [[L1:%.*]]), ptr inttoptr (i32 1 to ptr), ptr blockaddress(@test_indirectbr, [[L3:%.*]])]
+; CHECK: @anchor = constant [3 x ptr] [ptr blockaddress(@test_indirectbr, %L1), ptr inttoptr (i32 1 to ptr), ptr blockaddress(@test_indirectbr, %L3)]
;.
define void @test_br(i1 %cond, i1 %value) nounwind {
; CHECK-LABEL: @test_br(
@@ -66,8 +66,8 @@ define void @test_switch(i1 %cond, i8 %value) nounwind {
; CHECK-NEXT: call void @quux()
; CHECK-NEXT: [[EXPR:%.*]] = select i1 [[COND]], i8 1, i8 [[VALUE:%.*]]
; CHECK-NEXT: switch i8 [[EXPR]], label [[L3:%.*]] [
-; CHECK-NEXT: i8 1, label [[L1]]
-; CHECK-NEXT: i8 2, label [[L2:%.*]]
+; CHECK-NEXT: i8 1, label [[L1]]
+; CHECK-NEXT: i8 2, label [[L2:%.*]]
; CHECK-NEXT: ]
; CHECK: L1:
; CHECK-NEXT: call void @foo()
@@ -192,8 +192,8 @@ define void @test_switch_cmp(i1 %cond, i32 %val, i8 %value) nounwind {
; CHECK: 0:
; CHECK-NEXT: [[TMP1:%.*]] = phi i8 [ [[VALUE:%.*]], [[L0]] ]
; CHECK-NEXT: switch i8 [[TMP1]], label [[L3:%.*]] [
-; CHECK-NEXT: i8 1, label [[L1]]
-; CHECK-NEXT: i8 2, label [[L2:%.*]]
+; CHECK-NEXT: i8 1, label [[L1]]
+; CHECK-NEXT: i8 2, label [[L2:%.*]]
; CHECK-NEXT: ]
; CHECK: L1:
; CHECK-NEXT: call void @foo()
@@ -237,8 +237,8 @@ define void @test_switch_default(ptr nocapture %status) nounwind {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[STATUS:%.*]], align 4
; CHECK-NEXT: switch i32 [[TMP0]], label [[L2:%.*]] [
-; CHECK-NEXT: i32 5061, label [[L2_THREAD:%.*]]
-; CHECK-NEXT: i32 0, label [[L2]]
+; CHECK-NEXT: i32 5061, label [[L2_THREAD:%.*]]
+; CHECK-NEXT: i32 0, label [[L2]]
; CHECK-NEXT: ]
; CHECK: L2.thread:
; CHECK-NEXT: store i32 10025, ptr [[STATUS]], align 4
@@ -377,21 +377,21 @@ define i32 @unfold3(i32 %u, i32 %v, i32 %w, i32 %x, i32 %y, i32 %z, i32 %j) noun
; CHECK-NEXT: br i1 [[CMP_I]], label [[DOTEXIT_THREAD4:%.*]], label [[COND_FALSE_I:%.*]]
; CHECK: cond.false.i:
; CHECK-NEXT: [[CMP4_I:%.*]] = icmp sgt i32 [[U]], [[V]]
-; CHECK-NEXT: br i1 [[CMP4_I]], label [[DOTEXIT_THREAD:%.*]], label [[COND_FALSE_6_I:%.*]]
+; CHECK-NEXT: br i1 [[CMP4_I]], label [[DOTEXIT_THREAD4]], label [[COND_FALSE_6_I:%.*]]
; CHECK: cond.false.6.i:
; CHECK-NEXT: [[CMP8_I:%.*]] = icmp slt i32 [[W:%.*]], [[X:%.*]]
; CHECK-NEXT: br i1 [[CMP8_I]], label [[DOTEXIT_THREAD4]], label [[COND_FALSE_10_I:%.*]]
; CHECK: cond.false.10.i:
; CHECK-NEXT: [[CMP13_I:%.*]] = icmp sgt i32 [[W]], [[X]]
-; CHECK-NEXT: br i1 [[CMP13_I]], label [[DOTEXIT_THREAD]], label [[DOTEXIT:%.*]]
+; CHECK-NEXT: br i1 [[CMP13_I]], label [[DOTEXIT_THREAD4]], label [[DOTEXIT:%.*]]
; CHECK: .exit:
; CHECK-NEXT: [[PHITMP:%.*]] = icmp sge i32 [[Y:%.*]], [[Z:%.*]]
; CHECK-NEXT: [[COND_FR:%.*]] = freeze i1 [[PHITMP]]
-; CHECK-NEXT: br i1 [[COND_FR]], label [[DOTEXIT_THREAD]], label [[DOTEXIT_THREAD4]]
-; CHECK: .exit.thread:
+; CHECK-NEXT: br i1 [[COND_FR]], label [[DOTEXIT_THREAD:%.*]], label [[DOTEXIT_THREAD4]]
+; CHECK: 0:
; CHECK-NEXT: br label [[DOTEXIT_THREAD4]]
-; CHECK: .exit.thread4:
-; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ [[J]], [[DOTEXIT_THREAD]] ], [ [[ADD3]], [[DOTEXIT]] ], [ [[ADD3]], [[ENTRY:%.*]] ], [ [[ADD3]], [[COND_FALSE_6_I]] ]
+; CHECK: .exit.thread:
+; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ [[ADD3]], [[DOTEXIT]] ], [ [[J]], [[DOTEXIT_THREAD]] ], [ [[J]], [[COND_FALSE_I]] ], [ [[J]], [[COND_FALSE_10_I]] ], [ [[ADD3]], [[ENTRY:%.*]] ], [ [[ADD3]], [[COND_FALSE_6_I]] ]
; CHECK-NEXT: ret i32 [[TMP0]]
;
entry:
@@ -430,23 +430,23 @@ define i32 @unfold4(i32 %u, i32 %v, i32 %w, i32 %x, i32 %y, i32 %z, i32 %j) noun
; CHECK-NEXT: br i1 [[CMP_I]], label [[DOTEXIT_THREAD:%.*]], label [[COND_FALSE_I:%.*]]
; CHECK: cond.false.i:
; CHECK-NEXT: [[CMP4_I:%.*]] = icmp sgt i32 [[U]], [[V]]
-; CHECK-NEXT: br i1 [[CMP4_I]], label [[DOTEXIT_THREAD5:%.*]], label [[COND_FALSE_6_I:%.*]]
+; CHECK-NEXT: br i1 [[CMP4_I]], label [[DOTEXIT_THREAD]], label [[COND_FALSE_6_I:%.*]]
; CHECK: cond.false.6.i:
; CHECK-NEXT: [[CMP8_I:%.*]] = icmp slt i32 [[W:%.*]], [[X:%.*]]
; CHECK-NEXT: br i1 [[CMP8_I]], label [[DOTEXIT_THREAD]], label [[COND_FALSE_10_I:%.*]]
; CHECK: cond.false.10.i:
; CHECK-NEXT: [[CMP13_I:%.*]] = icmp sgt i32 [[W]], [[X]]
-; CHECK-NEXT: br i1 [[CMP13_I]], label [[DOTEXIT_THREAD5]], label [[DOTEXIT:%.*]]
+; CHECK-NEXT: br i1 [[CMP13_I]], label [[DOTEXIT_THREAD]], label [[DOTEXIT:%.*]]
; CHECK: .exit:
; CHECK-NEXT: [[CMP19_I:%.*]] = icmp sge i32 [[Y:%.*]], [[Z:%.*]]
; CHECK-NEXT: [[CONV:%.*]] = zext i1 [[CMP19_I]] to i32
; CHECK-NEXT: [[LNOT_I18:%.*]] = icmp eq i32 [[CONV]], 1
; CHECK-NEXT: [[COND_FR:%.*]] = freeze i1 [[LNOT_I18]]
-; CHECK-NEXT: br i1 [[COND_FR]], label [[DOTEXIT_THREAD]], label [[DOTEXIT_THREAD5]]
+; CHECK-NEXT: br i1 [[COND_FR]], label [[TMP1:%.*]], label [[DOTEXIT_THREAD]]
+; CHECK: 0:
+; CHECK-NEXT: br label [[DOTEXIT_THREAD]]
; CHECK: .exit.thread:
-; CHECK-NEXT: br label [[DOTEXIT_THREAD5]]
-; CHECK: .exit.thread5:
-; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ [[J]], [[DOTEXIT_THREAD]] ], [ [[ADD3]], [[DOTEXIT]] ], [ [[ADD3]], [[COND_FALSE_I]] ], [ [[ADD3]], [[COND_FALSE_10_I]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ [[ADD3]], [[DOTEXIT]] ], [ [[J]], [[TMP1]] ], [ [[J]], [[ENTRY:%.*]] ], [ [[J]], [[COND_FALSE_6_I]] ], [ [[ADD3]], [[COND_FALSE_I]] ], [ [[ADD3]], [[COND_FALSE_10_I]] ]
; CHECK-NEXT: ret i32 [[TMP0]]
;
entry:
@@ -560,10 +560,10 @@ define void @test_func(ptr nocapture readonly %a, ptr nocapture readonly %b, ptr
; CHECK: if.end:
; CHECK-NEXT: [[LOCAL_VAR_0:%.*]] = phi i32 [ [[TMP1]], [[FOR_BODY]] ]
; CHECK-NEXT: switch i32 [[LOCAL_VAR_0]], label [[SW_DEFAULT]] [
-; CHECK-NEXT: i32 2, label [[SW_BB]]
-; CHECK-NEXT: i32 4, label [[SW_BB7]]
-; CHECK-NEXT: i32 5, label [[SW_BB8:%.*]]
-; CHECK-NEXT: i32 7, label [[SW_BB9:%.*]]
+; CHECK-NEXT: i32 2, label [[SW_BB]]
+; CHECK-NEXT: i32 4, label [[SW_BB7]]
+; CHECK-NEXT: i32 5, label [[SW_BB8:%.*]]
+; CHECK-NEXT: i32 7, label [[SW_BB9:%.*]]
; CHECK-NEXT: ]
; CHECK: sw.bb:
; CHECK-NEXT: call void @foo()
@@ -674,3 +674,5 @@ if.end:
; CHECK: [[META0:![0-9]+]] = !{!"function_entry_count", i64 1984}
; CHECK: [[PROF1]] = !{!"branch_weights", i64 1073741824, i64 3221225472}
;.
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK-BPI: {{.*}}
diff --git a/llvm/test/Transforms/JumpThreading/thread-prob-7.ll b/llvm/test/Transforms/JumpThreading/thread-prob-7.ll
index 8c9d898..4623a57 100644
--- a/llvm/test/Transforms/JumpThreading/thread-prob-7.ll
+++ b/llvm/test/Transforms/JumpThreading/thread-prob-7.ll
@@ -14,15 +14,15 @@ define i32 @func0(i32 %a0, i32 %a1) !prof !0 {
; CHECK-NEXT: br i1 [[CMP1]], label [[BB_JOIN_THREAD:%.*]], label [[TEST2_FALSE:%.*]], !prof [[PROF2:![0-9]+]]
; CHECK: test2_false:
; CHECK-NEXT: call void @foobar()
-; CHECK-NEXT: br label [[TMP0:%.*]]
+; CHECK-NEXT: br label [[BB_JOIN_THREAD]]
; CHECK: bb_join:
; CHECK-NEXT: [[C:%.*]] = phi i1 [ [[CX]], [[ENTRY:%.*]] ]
; CHECK-NEXT: [[COND_FR:%.*]] = freeze i1 [[C]]
-; CHECK-NEXT: br i1 [[COND_FR]], label [[BB_JOIN_THREAD]], label [[TMP0]], !prof [[PROF3:![0-9]+]]
+; CHECK-NEXT: br i1 [[COND_FR]], label [[BB_JOIN_THREAD1:%.*]], label [[BB_JOIN_THREAD]], !prof [[PROF3:![0-9]+]]
; CHECK: bb_join.thread:
-; CHECK-NEXT: br label [[TMP0]]
+; CHECK-NEXT: br label [[BB_JOIN_THREAD]]
; CHECK: 0:
-; CHECK-NEXT: [[TMP1:%.*]] = phi i32 [ 42, [[BB_JOIN_THREAD]] ], [ 7, [[BB_JOIN]] ], [ 7, [[TEST2_FALSE]] ]
+; CHECK-NEXT: [[TMP1:%.*]] = phi i32 [ 7, [[BB_JOIN]] ], [ 7, [[TEST2_FALSE]] ], [ 42, [[TEST2]] ], [ 42, [[BB_JOIN_THREAD1]] ]
; CHECK-NEXT: ret i32 [[TMP1]]
;
entry:
diff --git a/llvm/test/Transforms/JumpThreading/uncond-no-phi.ll b/llvm/test/Transforms/JumpThreading/uncond-no-phi.ll
new file mode 100644
index 0000000..6104e8f
--- /dev/null
+++ b/llvm/test/Transforms/JumpThreading/uncond-no-phi.ll
@@ -0,0 +1,123 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -passes=jump-threading -S < %s | FileCheck %s
+
+define i1 @if_else(i1 %c, i1 %c1) {
+; CHECK-LABEL: define i1 @if_else(
+; CHECK-SAME: i1 [[C:%.*]], i1 [[C1:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 [[C]], label [[THEN:%.*]], label [[RETURN:%.*]]
+; CHECK: then:
+; CHECK-NEXT: call void @dummy()
+; CHECK-NEXT: br i1 [[C1]], label [[ELSE:%.*]], label [[RETURN]]
+; CHECK: else:
+; CHECK-NEXT: br label [[RETURN]]
+; CHECK: return:
+; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i1 [ false, [[THEN]] ], [ true, [[ENTRY:%.*]] ], [ true, [[ELSE]] ]
+; CHECK-NEXT: ret i1 [[RETVAL_0]]
+;
+entry:
+ br i1 %c, label %then, label %else
+
+then:
+ call void @dummy()
+ br i1 %c1, label %else, label %return
+
+else:
+ br label %return
+
+return:
+ %retval.0 = phi i1 [ true, %else ], [ false, %then ]
+ ret i1 %retval.0
+}
+
+define i8 @switch_uncond(i8 %arg) {
+; CHECK-LABEL: define i8 @switch_uncond(
+; CHECK-SAME: i8 [[ARG:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: switch i8 [[ARG]], label [[DEFAULT:%.*]] [
+; CHECK-NEXT: i8 0, label [[BB1:%.*]]
+; CHECK-NEXT: i8 1, label [[BB3:%.*]]
+; CHECK-NEXT: i8 2, label [[BB2:%.*]]
+; CHECK-NEXT: i8 3, label [[END:%.*]]
+; CHECK-NEXT: ]
+; CHECK: default:
+; CHECK-NEXT: unreachable
+; CHECK: bb:
+; CHECK-NEXT: call void @dummy()
+; CHECK-NEXT: br label [[END]]
+; CHECK: bb1:
+; CHECK-NEXT: call void @dummy()
+; CHECK-NEXT: br label [[END]]
+; CHECK: bb2:
+; CHECK-NEXT: br label [[END]]
+; CHECK: end:
+; CHECK-NEXT: [[PHI:%.*]] = phi i8 [ 1, [[ENTRY:%.*]] ], [ 0, [[BB3]] ], [ 0, [[BB1]] ], [ 0, [[BB2]] ]
+; CHECK-NEXT: ret i8 [[PHI]]
+;
+entry:
+ switch i8 %arg, label %default [
+ i8 0, label %bb
+ i8 1, label %bb1
+ i8 2, label %bb2
+ i8 3, label %end
+ ]
+
+default:
+ unreachable
+
+bb:
+ call void @dummy()
+ br label %bb2
+
+bb1:
+ call void @dummy()
+ br label %bb2
+
+; Predecessors of %bb2 are %bb and %bb1, they are not identical.
+; So we can thread %bb2.
+bb2:
+ br label %end
+
+end:
+ %phi = phi i8 [ 0, %bb2 ], [ 1, %entry ]
+ ret i8 %phi
+}
+
+define i8 @switch_uncond_fail(i8 %arg) {
+; CHECK-LABEL: define i8 @switch_uncond_fail(
+; CHECK-SAME: i8 [[ARG:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: switch i8 [[ARG]], label [[DEFAULT:%.*]] [
+; CHECK-NEXT: i8 0, label [[BB:%.*]]
+; CHECK-NEXT: i8 1, label [[BB]]
+; CHECK-NEXT: i8 2, label [[END:%.*]]
+; CHECK-NEXT: ]
+; CHECK: default:
+; CHECK-NEXT: br label [[END]]
+; CHECK: bb:
+; CHECK-NEXT: br label [[END]]
+; CHECK: end:
+; CHECK-NEXT: [[PHI:%.*]] = phi i8 [ 0, [[BB]] ], [ 1, [[ENTRY:%.*]] ], [ 2, [[DEFAULT]] ]
+; CHECK-NEXT: ret i8 [[PHI]]
+;
+entry:
+ switch i8 %arg, label %default [
+ i8 0, label %bb
+ i8 1, label %bb
+ i8 2, label %end
+ ]
+
+default:
+ br label %end
+
+; Predecessor of %bb is only %entry (though there are two in predecessor list),
+; thus it's unthreadable.
+bb:
+ br label %end
+
+end:
+ %phi = phi i8 [ 0, %bb ], [ 1, %entry ], [ 2, %default ]
+ ret i8 %phi
+}
+
+declare void @dummy()
diff --git a/llvm/test/Transforms/PhaseOrdering/thread-uncond-bb.ll b/llvm/test/Transforms/PhaseOrdering/thread-uncond-bb.ll
new file mode 100644
index 0000000..17146d7
--- /dev/null
+++ b/llvm/test/Transforms/PhaseOrdering/thread-uncond-bb.ll
@@ -0,0 +1,62 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt < %s -O3 -S | FileCheck %s
+
+define i32 @thread_uncond_bb_cmp(i1 %c, i32 %v) {
+; CHECK-LABEL: define i32 @thread_uncond_bb_cmp(
+; CHECK-SAME: i1 [[C:%.*]], i32 [[V:%.*]]) local_unnamed_addr {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 [[C]], label [[DO_END:%.*]], label [[IF_THEN:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: tail call void @dummy()
+; CHECK-NEXT: br label [[DO_END]]
+; CHECK: return:
+; CHECK-NEXT: [[RETVAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[V]], [[IF_THEN]] ]
+; CHECK-NEXT: ret i32 [[RETVAL]]
+;
+entry:
+ br i1 %c, label %do.end, label %if.then
+
+if.then: ; preds = %entry
+ call void @dummy()
+ %tobool = icmp eq i32 %v, 0
+ br i1 %tobool, label %do.end, label %return
+
+do.end: ; preds = %entry, %if.then
+ br label %return
+
+return: ; preds = %if.then, %do.end
+ %retval = phi i32 [ 0, %do.end ], [ %v, %if.then ]
+ ret i32 %retval
+}
+
+define i32 @thread_uncond_bb_cmp_zext(i1 %c, i32 %v) {
+; CHECK-LABEL: define i32 @thread_uncond_bb_cmp_zext(
+; CHECK-SAME: i1 [[C:%.*]], i32 [[V:%.*]]) local_unnamed_addr {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 [[C]], label [[DO_END:%.*]], label [[IF_THEN:%.*]]
+; CHECK: if.then:
+; CHECK-NEXT: tail call void @dummy()
+; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[V]], 0
+; CHECK-NEXT: [[SPEC_SELECT:%.*]] = zext i1 [[TOBOOL]] to i32
+; CHECK-NEXT: br label [[DO_END]]
+; CHECK: return:
+; CHECK-NEXT: [[RETVAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[SPEC_SELECT]], [[IF_THEN]] ]
+; CHECK-NEXT: ret i32 [[RETVAL]]
+;
+entry:
+ br i1 %c, label %do.end, label %if.then
+
+if.then: ; preds = %entry
+ call void @dummy()
+ %tobool = icmp eq i32 %v, 0
+ br i1 %tobool, label %do.end, label %return
+
+do.end: ; preds = %entry, %if.then
+ br label %return
+
+return: ; preds = %if.then, %do.end
+ %retval = phi i32 [ 0, %do.end ], [ 1, %if.then ]
+ ret i32 %retval
+}
+
+declare void @dummy()
diff --git a/llvm/test/Transforms/SCCP/pr50901.ll b/llvm/test/Transforms/SCCP/pr50901.ll
index 11d6bba..d48d675 100644
--- a/llvm/test/Transforms/SCCP/pr50901.ll
+++ b/llvm/test/Transforms/SCCP/pr50901.ll
@@ -52,6 +52,16 @@
; CHECK: = !DIGlobalVariableExpression(var: ![[DBG_FLOAT_UNDEF:.+]], expr: !DIExpression())
; CHECK-DAG: ![[DBG_FLOAT_UNDEF]] = distinct !DIGlobalVariable(name: "g_float_undef"
+; CHECK: ![[G8:[0-9]+]] = !DIGlobalVariableExpression(var: ![[DBG8:[0-9]+]], expr: !DIExpression(DW_OP_constu, 22136, DW_OP_stack_value))
+; CHECK-DAG: ![[DBG8]] = distinct !DIGlobalVariable(name: "g_88", {{.*}}
+; CHECK: ![[G9:[0-9]+]] = !DIGlobalVariableExpression(var: ![[DBG9:[0-9]+]], expr: !DIExpression(DW_OP_constu, 23726, DW_OP_stack_value))
+; CHECK-DAG: ![[DBG9]] = distinct !DIGlobalVariable(name: "g_99", {{.*}}
+
+; CHECK-DAG: ![[DBGA:[0-9]+]] = distinct !DIGlobalVariable(name: "g_i32_undef"
+; CHECK-DAG: ![[GA:[0-9]+]] = !DIGlobalVariableExpression(var: ![[DBGA]], expr: !DIExpression())
+; CHECK-DAG: ![[DBGB:[0-9]+]] = distinct !DIGlobalVariable(name: "g_ptr_undef"
+; CHECK-DAG: ![[GB:[0-9]+]] = !DIGlobalVariableExpression(var: ![[DBGB]], expr: !DIExpression())
+
@g_1 = dso_local global i32 -4, align 4, !dbg !0
@g_2 = dso_local global float 0x4011C28F60000000, align 4, !dbg !8
@g_3 = dso_local global i8 97, align 1, !dbg !10
@@ -59,6 +69,8 @@
@g_5 = dso_local global i8 1, align 1, !dbg !16
@g_6 = dso_local global ptr null, align 8, !dbg !19
@g_7 = dso_local global ptr null, align 8, !dbg !23
+@g_8 = dso_local global half 0xH4321, align 4, !dbg !86
+@g_9 = dso_local global bfloat 0xR3F80, align 4, !dbg !90
@_ZL4g_11 = internal global i32 -5, align 4, !dbg !25
@_ZL4g_22 = internal global float 0x4016333340000000, align 4, !dbg !27
@_ZL4g_33 = internal global i8 98, align 1, !dbg !29
@@ -67,6 +79,10 @@
@_ZL4g_66 = internal global ptr null, align 8, !dbg !35
@_ZL4g_77 = internal global ptr inttoptr (i64 70 to ptr), align 8, !dbg !37
@g_float_undef = internal global float undef, align 4, !dbg !83
+@_ZL4g_88 = internal global half 0xH5678, align 4, !dbg !88
+@_ZL4g_99 = internal global bfloat 0xR5CAE, align 4, !dbg !92
+@g_i32_undef = internal global i32 undef, align 4, !dbg !95
+@g_ptr_undef = internal global ptr undef, align 8, !dbg !97
define dso_local void @_Z3barv() !dbg !46 {
entry:
@@ -88,6 +104,15 @@ entry:
store ptr %6, ptr @g_7, align 8, !dbg !59
%l = load float, ptr @g_float_undef, align 8, !dbg !59
store float %l, ptr @g_2, align 8, !dbg !59
+ %7 = load half, ptr @_ZL4g_88, align 4, !dbg !59
+ store half %7, ptr @g_8, align 4, !dbg !59
+ %8 = load bfloat, ptr @_ZL4g_99, align 4, !dbg !59
+ store bfloat %8, ptr @g_9, align 4, !dbg !59
+ %9 = load i32, ptr @g_i32_undef, align 4, !dbg !59
+ store i32 %9, ptr @g_1, align 4, !dbg !59
+ %10 = load ptr, ptr @g_ptr_undef, align 8, !dbg !59
+ store ptr %10, ptr @g_6, align 8, !dbg !59
+
ret void, !dbg !59
}
@@ -108,7 +133,7 @@ entry:
!4 = !{!5}
!5 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !6, size: 64)
!6 = !DIBasicType(name: "float", size: 32, encoding: DW_ATE_float)
-!7 = !{!0, !8, !10, !13, !16, !19, !23, !25, !27, !29, !31, !33, !35, !37, !83}
+!7 = !{!0, !8, !10, !13, !16, !19, !23, !25, !27, !29, !31, !33, !35, !37, !83, !86, !88, !90, !92, !95, !97}
!8 = !DIGlobalVariableExpression(var: !9, expr: !DIExpression())
!9 = distinct !DIGlobalVariable(name: "g_2", scope: !2, file: !3, line: 2, type: !6, isLocal: false, isDefinition: true)
!10 = !DIGlobalVariableExpression(var: !11, expr: !DIExpression())
@@ -159,3 +184,17 @@ entry:
!82 = !DILocation(line: 31, column: 1, scope: !77)
!83 = !DIGlobalVariableExpression(var: !84, expr: !DIExpression())
!84 = distinct !DIGlobalVariable(name: "g_float_undef", linkageName: "g_float_undef", scope: !2, file: !3, line: 15, type: !6, isLocal: true, isDefinition: true)
+!85 = !DIBasicType(name: "float", size: 16, encoding: DW_ATE_float)
+!86 = !DIGlobalVariableExpression(var: !87, expr: !DIExpression())
+!87 = distinct !DIGlobalVariable(name: "g_8", scope: !2, file: !3, line: 2, type: !85, isLocal: false, isDefinition: true)
+!88 = !DIGlobalVariableExpression(var: !89, expr: !DIExpression())
+!89 = distinct !DIGlobalVariable(name: "g_88", linkageName: "_ZL4g_88", scope: !2, file: !3, line: 10, type: !85, isLocal: true, isDefinition: true)
+!90 = !DIGlobalVariableExpression(var: !91, expr: !DIExpression())
+!91 = distinct !DIGlobalVariable(name: "g_9", scope: !2, file: !3, line: 2, type: !85, isLocal: false, isDefinition: true)
+!92 = !DIGlobalVariableExpression(var: !93, expr: !DIExpression())
+!93 = distinct !DIGlobalVariable(name: "g_99", linkageName: "_ZL4g_99", scope: !2, file: !3, line: 10, type: !85, isLocal: true, isDefinition: true)
+
+!95 = !DIGlobalVariableExpression(var: !96, expr: !DIExpression())
+!96 = distinct !DIGlobalVariable(name: "g_i32_undef", linkageName: "g_i32_undef", scope: !2, file: !3, line: 9, type: !22, isLocal: true, isDefinition: true)
+!97 = !DIGlobalVariableExpression(var: !98, expr: !DIExpression())
+!98 = distinct !DIGlobalVariable(name: "g_ptr_undef", linkageName: "g_ptr_undef", scope: !2, file: !3, line: 14, type: !21, isLocal: true, isDefinition: true)
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/trunc-store-value-ty-not-power-of-2.ll b/llvm/test/Transforms/SLPVectorizer/X86/trunc-store-value-ty-not-power-of-2.ll
index 81b4ee4..2f0fad7 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/trunc-store-value-ty-not-power-of-2.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/trunc-store-value-ty-not-power-of-2.ll
@@ -107,3 +107,36 @@ define void @test_4_trunc_i24_to_i16(i24 %x, ptr %A) {
store i16 %t, ptr %gep.3, align 1
ret void
}
+
+%struct.d = type { [3 x i8], [3 x i8], [2 x i8] }
+
+; Test case for https://github.com/llvm/llvm-project/issues/88640.
+define void @test_access_i24_directly(ptr %src, ptr noalias %dst) "target-cpu"="btver2" {
+; CHECK-LABEL: define void @test_access_i24_directly(
+; CHECK-SAME: ptr [[SRC:%.*]], ptr noalias [[DST:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[SRC]], align 8
+; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i24
+; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds [[STRUCT_D:%.*]], ptr [[SRC]], i64 0, i32 1
+; CHECK-NEXT: [[BF_LOAD:%.*]] = load i24, ptr [[GEP_SRC]], align 1
+; CHECK-NEXT: [[BF_VALUE:%.*]] = and i24 [[TMP1]], 8388607
+; CHECK-NEXT: [[BF_CLEAR:%.*]] = and i24 [[BF_LOAD]], -8388608
+; CHECK-NEXT: [[BF_SET:%.*]] = or disjoint i24 [[BF_CLEAR]], [[BF_VALUE]]
+; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds [[STRUCT_D]], ptr [[DST]], i64 0, i32 1
+; CHECK-NEXT: store i24 [[BF_SET]], ptr [[GEP_DST]], align 1
+; CHECK-NEXT: store i24 0, ptr [[DST]], align 8
+; CHECK-NEXT: ret void
+;
+entry:
+ %0 = load i64, ptr %src, align 8
+ %1 = trunc i64 %0 to i24
+ %gep.src = getelementptr inbounds %struct.d, ptr %src, i64 0, i32 1
+ %bf.load = load i24, ptr %gep.src, align 1
+ %bf.value = and i24 %1, 8388607
+ %bf.clear = and i24 %bf.load, -8388608
+ %bf.set = or disjoint i24 %bf.clear, %bf.value
+ %gep.dst = getelementptr inbounds %struct.d, ptr %dst, i64 0, i32 1
+ store i24 %bf.set, ptr %gep.dst, align 1
+ store i24 0, ptr %dst, align 8
+ ret void
+}
diff --git a/llvm/test/Verifier/variadic.ll b/llvm/test/Verifier/variadic.ll
new file mode 100644
index 0000000..55e4a4d
--- /dev/null
+++ b/llvm/test/Verifier/variadic.ll
@@ -0,0 +1,8 @@
+; RUN: not opt -S -passes=verify 2>&1 < %s | FileCheck %s
+
+; CHECK: va_start called in a non-varargs function
+declare void @llvm.va_start(ptr)
+define void @not_vararg(ptr %p) nounwind {
+ call void @llvm.va_start(ptr %p)
+ ret void
+}
diff --git a/llvm/tools/llvm-exegesis/lib/SubprocessMemory.cpp b/llvm/tools/llvm-exegesis/lib/SubprocessMemory.cpp
index 0a947f6..4699fbb 100644
--- a/llvm/tools/llvm-exegesis/lib/SubprocessMemory.cpp
+++ b/llvm/tools/llvm-exegesis/lib/SubprocessMemory.cpp
@@ -22,7 +22,7 @@
namespace llvm {
namespace exegesis {
-#if defined(__linux__) && !defined(__ANDROID__)
+#if defined(__linux__)
long SubprocessMemory::getCurrentTID() {
// We're using the raw syscall here rather than the gettid() function provided
@@ -31,6 +31,8 @@ long SubprocessMemory::getCurrentTID() {
return syscall(SYS_gettid);
}
+#if !defined(__ANDROID__)
+
Error SubprocessMemory::initializeSubprocessMemory(pid_t ProcessID) {
// Add the PID to the shared memory name so that if we're running multiple
// processes at the same time, they won't interfere with each other.
@@ -157,7 +159,8 @@ Expected<int> SubprocessMemory::setupAuxiliaryMemoryInSubprocess(
SubprocessMemory::~SubprocessMemory() {}
-#endif // defined(__linux__) && !defined(__ANDROID__)
+#endif // !defined(__ANDROID__)
+#endif // defined(__linux__)
} // namespace exegesis
} // namespace llvm
diff --git a/llvm/unittests/Bitcode/DataLayoutUpgradeTest.cpp b/llvm/unittests/Bitcode/DataLayoutUpgradeTest.cpp
index 4865616..d7e4dba 100644
--- a/llvm/unittests/Bitcode/DataLayoutUpgradeTest.cpp
+++ b/llvm/unittests/Bitcode/DataLayoutUpgradeTest.cpp
@@ -59,6 +59,14 @@ TEST(DataLayoutUpgradeTest, ValidDataLayoutUpgrade) {
EXPECT_EQ(UpgradeDataLayoutString("e-m:e-p:64:64-i64:64-i128:128-n64-S128",
"riscv64"),
"e-m:e-p:64:64-i64:64-i128:128-n32:64-S128");
+
+ // Check that SPIR && SPIRV targets add -G1 if it's not present.
+ EXPECT_EQ(UpgradeDataLayoutString("e-p:32:32", "spir"), "e-p:32:32-G1");
+ EXPECT_EQ(UpgradeDataLayoutString("e-p:32:32", "spir64"), "e-p:32:32-G1");
+ EXPECT_EQ(UpgradeDataLayoutString("e-p:32:32", "spirv32"), "e-p:32:32-G1");
+ EXPECT_EQ(UpgradeDataLayoutString("e-p:32:32", "spirv64"), "e-p:32:32-G1");
+ // but that SPIRV Logical does not.
+ EXPECT_EQ(UpgradeDataLayoutString("e-p:32:32", "spirv"), "e-p:32:32");
}
TEST(DataLayoutUpgradeTest, NoDataLayoutUpgrade) {
@@ -100,6 +108,17 @@ TEST(DataLayoutUpgradeTest, NoDataLayoutUpgrade) {
"p7:64:64-G2-e-p:64:64-ni:7:8:9-p8:128:128-p9:192:256:256:32");
EXPECT_EQ(UpgradeDataLayoutString("e-p:64:64-p7:64:64-G1", "amdgcn"),
"e-p:64:64-p7:64:64-G1-ni:7:8:9-p8:128:128-p9:192:256:256:32");
+
+ // Check that SPIR & SPIRV targets don't add -G1 if there is already a -G
+ // flag.
+ EXPECT_EQ(UpgradeDataLayoutString("e-p:32:32-G2", "spir"), "e-p:32:32-G2");
+ EXPECT_EQ(UpgradeDataLayoutString("e-p:32:32-G2", "spir64"), "e-p:32:32-G2");
+ EXPECT_EQ(UpgradeDataLayoutString("e-p:32:32-G2", "spirv32"), "e-p:32:32-G2");
+ EXPECT_EQ(UpgradeDataLayoutString("e-p:32:32-G2", "spirv64"), "e-p:32:32-G2");
+ EXPECT_EQ(UpgradeDataLayoutString("G2", "spir"), "G2");
+ EXPECT_EQ(UpgradeDataLayoutString("G2", "spir64"), "G2");
+ EXPECT_EQ(UpgradeDataLayoutString("G2", "spirv32"), "G2");
+ EXPECT_EQ(UpgradeDataLayoutString("G2", "spirv64"), "G2");
}
TEST(DataLayoutUpgradeTest, EmptyDataLayout) {
@@ -113,6 +132,14 @@ TEST(DataLayoutUpgradeTest, EmptyDataLayout) {
EXPECT_EQ(UpgradeDataLayoutString("", "r600"), "G1");
EXPECT_EQ(UpgradeDataLayoutString("", "amdgcn"),
"G1-ni:7:8:9-p7:160:256:256:32-p8:128:128-p9:192:256:256:32");
+
+ // Check that SPIR & SPIRV targets add G1 if it's not present.
+ EXPECT_EQ(UpgradeDataLayoutString("", "spir"), "G1");
+ EXPECT_EQ(UpgradeDataLayoutString("", "spir64"), "G1");
+ EXPECT_EQ(UpgradeDataLayoutString("", "spirv32"), "G1");
+ EXPECT_EQ(UpgradeDataLayoutString("", "spirv64"), "G1");
+ // but SPIRV Logical does not.
+ EXPECT_EQ(UpgradeDataLayoutString("", "spirv"), "");
}
} // end namespace
diff --git a/llvm/unittests/Transforms/Utils/LocalTest.cpp b/llvm/unittests/Transforms/Utils/LocalTest.cpp
index a86775a..d7d0ea2 100644
--- a/llvm/unittests/Transforms/Utils/LocalTest.cpp
+++ b/llvm/unittests/Transforms/Utils/LocalTest.cpp
@@ -1241,6 +1241,18 @@ TEST(Local, ExpressionForConstant) {
EXPECT_NE(Expr, nullptr);
EXPECT_EQ(Expr->getElement(1), 13841306799765140275U);
+ // Half.
+ Type *HalfTy = Type::getHalfTy(Context);
+ Expr = createExpression(ConstantFP::get(HalfTy, 5.55), HalfTy);
+ EXPECT_NE(Expr, nullptr);
+ EXPECT_EQ(Expr->getElement(1), 17805U);
+
+ // BFloat.
+ Type *BFloatTy = Type::getBFloatTy(Context);
+ Expr = createExpression(ConstantFP::get(BFloatTy, -5.55), BFloatTy);
+ EXPECT_NE(Expr, nullptr);
+ EXPECT_EQ(Expr->getElement(1), 49330U);
+
// Pointer.
PointerType *PtrTy = PointerType::get(Context, 0);
Expr = createExpression(ConstantPointerNull::get(PtrTy), PtrTy);
@@ -1257,15 +1269,6 @@ TEST(Local, ExpressionForConstant) {
EXPECT_NE(Expr, nullptr);
EXPECT_EQ(Expr->getElement(1), 5678U);
- // Others.
- Type *HalfTy = Type::getHalfTy(Context);
- Expr = createExpression(ConstantFP::get(HalfTy, 32), HalfTy);
- EXPECT_EQ(Expr, nullptr);
-
- Type *BFloatTy = Type::getBFloatTy(Context);
- Expr = createExpression(ConstantFP::get(BFloatTy, 32), BFloatTy);
- EXPECT_EQ(Expr, nullptr);
-
Type *FP128Ty = Type::getFP128Ty(Context);
Expr = createExpression(ConstantFP::get(FP128Ty, 32), FP128Ty);
EXPECT_EQ(Expr, nullptr);
diff --git a/llvm/utils/gn/secondary/lldb/test/BUILD.gn b/llvm/utils/gn/secondary/lldb/test/BUILD.gn
index 414ea49..c824573 100644
--- a/llvm/utils/gn/secondary/lldb/test/BUILD.gn
+++ b/llvm/utils/gn/secondary/lldb/test/BUILD.gn
@@ -118,6 +118,7 @@ write_lit_cfg("lit_shell_site_cfg") {
"LLDB_TOOLS_DIR=" + rebase_path("$root_out_dir/bin"),
"LLDB_USE_SYSTEM_DEBUGSERVER=1", # XXX port //lldb/tools/debugserver (?)
"LLVM_HOST_TRIPLE=$llvm_current_triple",
+ "LLVM_USE_SANITIZER=",
]
if (llvm_enable_zlib) {