aboutsummaryrefslogtreecommitdiff
path: root/llvm
diff options
context:
space:
mode:
Diffstat (limited to 'llvm')
-rw-r--r--llvm/benchmarks/FormatVariadicBM.cpp1
-rw-r--r--llvm/docs/RISCVUsage.rst3
-rw-r--r--llvm/docs/SandboxIR.md4
-rw-r--r--llvm/include/llvm/Analysis/InlineSizeEstimatorAnalysis.h47
-rw-r--r--llvm/include/llvm/Analysis/TargetLibraryInfo.h20
-rw-r--r--llvm/include/llvm/Bitcode/BitcodeWriter.h1
-rw-r--r--llvm/include/llvm/CodeGen/BasicBlockSectionsProfileReader.h16
-rw-r--r--llvm/include/llvm/CodeGen/BasicTTIImpl.h30
-rw-r--r--llvm/include/llvm/CodeGen/MachineBasicBlock.h2
-rw-r--r--llvm/include/llvm/CodeGen/SelectionDAG.h14
-rw-r--r--llvm/include/llvm/CodeGen/TargetInstrInfo.h11
-rw-r--r--llvm/include/llvm/CodeGen/TargetSubtargetInfo.h4
-rw-r--r--llvm/include/llvm/DebugInfo/GSYM/GsymContext.h1
-rw-r--r--llvm/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h1
-rw-r--r--llvm/include/llvm/Frontend/OpenMP/ClauseT.h1
-rw-r--r--llvm/include/llvm/Frontend/OpenMP/OMPConstants.h10
-rw-r--r--llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h25
-rw-r--r--llvm/include/llvm/IR/DebugInfo.h4
-rw-r--r--llvm/include/llvm/IR/DebugInfoMetadata.h33
-rw-r--r--llvm/include/llvm/IR/DiagnosticInfo.h1
-rw-r--r--llvm/include/llvm/IR/Dominators.h1
-rw-r--r--llvm/include/llvm/IR/MemoryModelRelaxationAnnotations.h3
-rw-r--r--llvm/include/llvm/IR/ProfileSummary.h1
-rw-r--r--llvm/include/llvm/IR/RuntimeLibcalls.h15
-rw-r--r--llvm/include/llvm/IR/RuntimeLibcalls.td58
-rw-r--r--llvm/include/llvm/IR/SystemLibraries.h39
-rw-r--r--llvm/include/llvm/MC/DXContainerPSVInfo.h1
-rw-r--r--llvm/include/llvm/MC/MCAsmInfo.h2
-rw-r--r--llvm/include/llvm/MC/MCAssembler.h2
-rw-r--r--llvm/include/llvm/Option/Arg.h2
-rw-r--r--llvm/include/llvm/Passes/PassBuilder.h2
-rw-r--r--llvm/include/llvm/Remarks/YAMLRemarkSerializer.h1
-rw-r--r--llvm/include/llvm/Support/FormatVariadic.h1
-rw-r--r--llvm/include/llvm/Support/Jobserver.h1
-rw-r--r--llvm/include/llvm/Support/SpecialCaseList.h47
-rw-r--r--llvm/include/llvm/Support/TypeSize.h1
-rw-r--r--llvm/include/llvm/Support/UnicodeCharRanges.h1
-rw-r--r--llvm/include/llvm/TableGen/DirectiveEmitter.h1
-rw-r--r--llvm/include/llvm/Transforms/Scalar/DropUnnecessaryAssumes.h6
-rw-r--r--llvm/include/llvm/Transforms/Scalar/JumpThreading.h1
-rw-r--r--llvm/include/llvm/Transforms/Scalar/Scalarizer.h1
-rw-r--r--llvm/include/llvm/Transforms/Utils/LowerVectorIntrinsics.h1
-rw-r--r--llvm/include/llvm/Transforms/Utils/SplitModuleByCategory.h1
-rw-r--r--llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/InstrMaps.h1
-rw-r--r--llvm/include/llvm/XRay/FDRRecordConsumer.h1
-rw-r--r--llvm/lib/Analysis/CMakeLists.txt1
-rw-r--r--llvm/lib/Analysis/DXILResource.cpp1
-rw-r--r--llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp106
-rw-r--r--llvm/lib/Analysis/InlineSizeEstimatorAnalysis.cpp281
-rw-r--r--llvm/lib/Analysis/TFLiteUtils.cpp1
-rw-r--r--llvm/lib/Analysis/TargetLibraryInfo.cpp42
-rw-r--r--llvm/lib/Analysis/TrainingLogger.cpp1
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp12
-rw-r--r--llvm/lib/CodeGen/BasicBlockSections.cpp23
-rw-r--r--llvm/lib/CodeGen/BasicBlockSectionsProfileReader.cpp16
-rw-r--r--llvm/lib/CodeGen/CodeGenPrepare.cpp17
-rw-r--r--llvm/lib/CodeGen/MLRegAllocEvictAdvisor.cpp106
-rw-r--r--llvm/lib/CodeGen/RegAllocGreedy.h1
-rw-r--r--llvm/lib/CodeGen/RegisterCoalescer.cpp12
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp20
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp14
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp3
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp26
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp100
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp17
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h4
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp2
-rw-r--r--llvm/lib/CodeGen/TargetInstrInfo.cpp68
-rw-r--r--llvm/lib/CodeGen/TargetLoweringBase.cpp18
-rw-r--r--llvm/lib/CodeGen/TargetSchedule.cpp1
-rw-r--r--llvm/lib/DebugInfo/DWARF/DWARFUnwindTablePrinter.cpp1
-rw-r--r--llvm/lib/Frontend/Driver/CodeGenOptions.cpp17
-rw-r--r--llvm/lib/Frontend/Offloading/OffloadWrapper.cpp1
-rw-r--r--llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp43
-rw-r--r--llvm/lib/IR/CMakeLists.txt1
-rw-r--r--llvm/lib/IR/DebugInfo.cpp14
-rw-r--r--llvm/lib/IR/DebugInfoMetadata.cpp13
-rw-r--r--llvm/lib/IR/RuntimeLibcalls.cpp123
-rw-r--r--llvm/lib/IR/SystemLibraries.cpp34
-rw-r--r--llvm/lib/IR/Verifier.cpp20
-rw-r--r--llvm/lib/Passes/PassBuilder.cpp6
-rw-r--r--llvm/lib/Passes/PassBuilderPipelines.cpp14
-rw-r--r--llvm/lib/Passes/PassRegistry.def8
-rw-r--r--llvm/lib/Support/Mustache.cpp1
-rw-r--r--llvm/lib/Support/SpecialCaseList.cpp91
-rw-r--r--llvm/lib/Support/Unix/Unix.h1
-rw-r--r--llvm/lib/Support/Windows/Program.inc1
-rw-r--r--llvm/lib/Support/Windows/Signals.inc1
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.cpp33
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrInfo.cpp71
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrInfo.td5
-rw-r--r--llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp86
-rw-r--r--llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td12
-rw-r--r--llvm/lib/Target/AArch64/AArch64Subtarget.h2
-rw-r--r--llvm/lib/Target/AArch64/AArch64TargetMachine.cpp1
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp12
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUResourceUsageAnalysis.cpp7
-rw-r--r--llvm/lib/Target/AMDGPU/GCNSubtarget.h2
-rw-r--r--llvm/lib/Target/AMDGPU/R600InstrInfo.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/R600Subtarget.h2
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.cpp3
-rw-r--r--llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp66
-rw-r--r--llvm/lib/Target/AMDGPU/VOP1Instructions.td39
-rw-r--r--llvm/lib/Target/AMDGPU/VOPInstructions.td8
-rw-r--r--llvm/lib/Target/ARC/ARCInstrInfo.cpp3
-rw-r--r--llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp5
-rw-r--r--llvm/lib/Target/ARM/ARMBaseInstrInfo.h9
-rw-r--r--llvm/lib/Target/ARM/ARMInstrInfo.cpp3
-rw-r--r--llvm/lib/Target/ARM/ARMInstrInfo.h2
-rw-r--r--llvm/lib/Target/ARM/ARMSubtarget.h1
-rw-r--r--llvm/lib/Target/ARM/Thumb1InstrInfo.cpp2
-rw-r--r--llvm/lib/Target/ARM/Thumb1InstrInfo.h2
-rw-r--r--llvm/lib/Target/ARM/Thumb2InstrInfo.cpp2
-rw-r--r--llvm/lib/Target/ARM/Thumb2InstrInfo.h2
-rw-r--r--llvm/lib/Target/AVR/AVRInstrInfo.cpp4
-rw-r--r--llvm/lib/Target/AVR/AVRTargetTransformInfo.h1
-rw-r--r--llvm/lib/Target/BPF/BPFInstrInfo.cpp2
-rw-r--r--llvm/lib/Target/CSKY/CSKYConstantIslandPass.cpp1
-rw-r--r--llvm/lib/Target/CSKY/CSKYInstrInfo.cpp2
-rw-r--r--llvm/lib/Target/DirectX/DXContainerGlobals.cpp1
-rw-r--r--llvm/lib/Target/DirectX/DXILResourceAccess.cpp156
-rw-r--r--llvm/lib/Target/DirectX/DXILRootSignature.h1
-rw-r--r--llvm/lib/Target/DirectX/DXILWriter/DXILBitcodeWriter.h1
-rw-r--r--llvm/lib/Target/DirectX/DirectXInstrInfo.cpp2
-rw-r--r--llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp4
-rw-r--r--llvm/lib/Target/Hexagon/HexagonInstrInfo.h5
-rw-r--r--llvm/lib/Target/Hexagon/HexagonSubtarget.cpp3
-rw-r--r--llvm/lib/Target/Hexagon/HexagonSubtarget.h3
-rw-r--r--llvm/lib/Target/Lanai/LanaiInstrInfo.cpp3
-rw-r--r--llvm/lib/Target/LoongArch/LoongArchInstrInfo.cpp4
-rw-r--r--llvm/lib/Target/LoongArch/LoongArchInstrInfo.h4
-rw-r--r--llvm/lib/Target/LoongArch/LoongArchSubtarget.cpp2
-rw-r--r--llvm/lib/Target/LoongArch/LoongArchSubtarget.h3
-rw-r--r--llvm/lib/Target/M68k/M68kSubtarget.h2
-rw-r--r--llvm/lib/Target/MSP430/MSP430InstrInfo.cpp3
-rw-r--r--llvm/lib/Target/Mips/Mips16InstrInfo.cpp6
-rw-r--r--llvm/lib/Target/Mips/Mips16InstrInfo.h2
-rw-r--r--llvm/lib/Target/Mips/MipsInstrInfo.cpp5
-rw-r--r--llvm/lib/Target/Mips/MipsInstrInfo.h8
-rw-r--r--llvm/lib/Target/Mips/MipsSEInstrInfo.cpp6
-rw-r--r--llvm/lib/Target/Mips/MipsSEInstrInfo.h2
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXInstrInfo.cpp2
-rw-r--r--llvm/lib/Target/PowerPC/PPCInstrInfo.cpp2
-rw-r--r--llvm/lib/Target/PowerPC/PPCSubtarget.h1
-rw-r--r--llvm/lib/Target/RISCV/RISCVFeatures.td3
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfo.cpp5
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfo.h3
-rw-r--r--llvm/lib/Target/RISCV/RISCVSubtarget.cpp2
-rw-r--r--llvm/lib/Target/RISCV/RISCVSubtarget.h3
-rw-r--r--llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp3
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVAsmPrinter.cpp23
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVInstrInfo.cpp2
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp17
-rw-r--r--llvm/lib/Target/Sparc/SparcInstrInfo.cpp4
-rw-r--r--llvm/lib/Target/Sparc/SparcSubtarget.h1
-rw-r--r--llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp2
-rw-r--r--llvm/lib/Target/VE/VEInstrInfo.cpp2
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp2
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp66
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h1
-rw-r--r--llvm/lib/Target/X86/X86FlagsCopyLowering.cpp1
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp1
-rw-r--r--llvm/lib/Target/X86/X86InstrInfo.cpp2
-rw-r--r--llvm/lib/Target/X86/X86Subtarget.h2
-rw-r--r--llvm/lib/Target/X86/X86TargetMachine.cpp1
-rw-r--r--llvm/lib/Target/XCore/XCoreInstrInfo.cpp2
-rw-r--r--llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp3
-rw-r--r--llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp4
-rw-r--r--llvm/lib/Transforms/Scalar/DropUnnecessaryAssumes.cpp7
-rw-r--r--llvm/lib/Transforms/Vectorize/LoopVectorize.cpp44
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlan.cpp6
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlan.h5
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanHelpers.h19
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp2
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp56
-rw-r--r--llvm/test/Analysis/CostModel/RISCV/cast.ll10
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-combine-add-sub-mul.ll7
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-regress-opt-cmp-signed.mir55
-rw-r--r--llvm/test/CodeGen/AArch64/build-vector-two-dup.ll10
-rw-r--r--llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-contract.ll22
-rw-r--r--llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-fast.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/complex-deinterleaving-f16-add-scalable.ll9
-rw-r--r--llvm/test/CodeGen/AArch64/complex-deinterleaving-i16-mul-scalable.ll7
-rw-r--r--llvm/test/CodeGen/AArch64/icmp-ult-eq-fold.ll332
-rw-r--r--llvm/test/CodeGen/AArch64/llvm-ir-to-intrinsic.ll5
-rw-r--r--llvm/test/CodeGen/AArch64/llvm.sincospi.error.ll13
-rw-r--r--llvm/test/CodeGen/AArch64/llvm.sincospi.ll308
-rw-r--r--llvm/test/CodeGen/AArch64/machine-licm-sink-instr.ll39
-rw-r--r--llvm/test/CodeGen/AArch64/machine-outliner-adrp-got-split.mir133
-rw-r--r--llvm/test/CodeGen/AArch64/machine-sink-kill-flags.ll5
-rw-r--r--llvm/test/CodeGen/AArch64/sbc.ll392
-rw-r--r--llvm/test/CodeGen/AArch64/sme-pstate-sm-changing-call-disable-coalescing.ll85
-rw-r--r--llvm/test/CodeGen/AArch64/sme-streaming-compatible-interface.ll5
-rw-r--r--llvm/test/CodeGen/AArch64/sve-extract-element.ll132
-rw-r--r--llvm/test/CodeGen/AArch64/sve-extract-fixed-from-scalable-vector.ll12
-rw-r--r--llvm/test/CodeGen/AArch64/sve-extract-fixed-vector.ll39
-rw-r--r--llvm/test/CodeGen/AArch64/sve-fixed-length-fp-arith.ll60
-rw-r--r--llvm/test/CodeGen/AArch64/sve-fixed-length-fp-fma.ll15
-rw-r--r--llvm/test/CodeGen/AArch64/sve-fixed-length-fp-minmax.ll60
-rw-r--r--llvm/test/CodeGen/AArch64/sve-fixed-length-int-abd.ll5
-rw-r--r--llvm/test/CodeGen/AArch64/sve-fixed-length-int-arith.ll20
-rw-r--r--llvm/test/CodeGen/AArch64/sve-fixed-length-int-minmax.ll80
-rw-r--r--llvm/test/CodeGen/AArch64/sve-fixed-length-int-mulh.ll40
-rw-r--r--llvm/test/CodeGen/AArch64/sve-fixed-length-int-rem.ll30
-rw-r--r--llvm/test/CodeGen/AArch64/sve-fixed-length-int-shifts.ll60
-rw-r--r--llvm/test/CodeGen/AArch64/sve-fixed-length-reshuffle.ll12
-rw-r--r--llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll72
-rw-r--r--llvm/test/CodeGen/AArch64/sve-ptest-removal-sink.ll8
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-arith.ll75
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-fma.ll15
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-minmax.ll60
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-arith.ll20
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-div.ll20
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-minmax.ll80
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mulh.ll40
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll20
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-shifts.ll60
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-optimize-ptrue.ll15
-rw-r--r--llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-zip-uzp-trn.ll10
-rw-r--r--llvm/test/CodeGen/AArch64/sve-vecreduce-dot.ll5
-rw-r--r--llvm/test/CodeGen/AArch64/sve2-xar.ll5
-rw-r--r--llvm/test/CodeGen/AArch64/zext-to-tbl.ll92
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.ll18
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-i1.ll38
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mui.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll420
-rw-r--r--llvm/test/CodeGen/AMDGPU/and.ll100
-rw-r--r--llvm/test/CodeGen/AMDGPU/bf16-math.ll5
-rw-r--r--llvm/test/CodeGen/AMDGPU/bfe-patterns.ll84
-rw-r--r--llvm/test/CodeGen/AMDGPU/bfi_nested.ll17
-rw-r--r--llvm/test/CodeGen/AMDGPU/bfm.ll13
-rw-r--r--llvm/test/CodeGen/AMDGPU/bitreverse.ll42
-rw-r--r--llvm/test/CodeGen/AMDGPU/build_vector.ll17
-rw-r--r--llvm/test/CodeGen/AMDGPU/cc-entry.ll69
-rw-r--r--llvm/test/CodeGen/AMDGPU/combine-cond-add-sub.ll32
-rw-r--r--llvm/test/CodeGen/AMDGPU/compute-known-bits-nofpclass.ll46
-rw-r--r--llvm/test/CodeGen/AMDGPU/copysign-simplify-demanded-bits.ll113
-rw-r--r--llvm/test/CodeGen/AMDGPU/copysign-to-disjoint-or-combine.ll198
-rw-r--r--llvm/test/CodeGen/AMDGPU/divergence-driven-buildvector.ll17
-rw-r--r--llvm/test/CodeGen/AMDGPU/divergence-driven-sext-inreg.ll56
-rw-r--r--llvm/test/CodeGen/AMDGPU/ds-read2-write2-debug-info.ll89
-rw-r--r--llvm/test/CodeGen/AMDGPU/fabs.ll17
-rw-r--r--llvm/test/CodeGen/AMDGPU/fdiv.ll84
-rw-r--r--llvm/test/CodeGen/AMDGPU/fmin_legacy.ll15
-rw-r--r--llvm/test/CodeGen/AMDGPU/fnearbyint.ll13
-rw-r--r--llvm/test/CodeGen/AMDGPU/fneg-fabs.ll17
-rw-r--r--llvm/test/CodeGen/AMDGPU/fneg.ll17
-rw-r--r--llvm/test/CodeGen/AMDGPU/fp_to_sint.ll85
-rw-r--r--llvm/test/CodeGen/AMDGPU/fp_to_uint.ll48
-rw-r--r--llvm/test/CodeGen/AMDGPU/fshl.ll19
-rw-r--r--llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll42
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ubfe.ll30
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.exp.ll31
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.exp10.ll31
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.exp2.ll31
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.log.ll37
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.log10.ll37
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.log2.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll2050
-rw-r--r--llvm/test/CodeGen/AMDGPU/local-atomicrmw-fmax.ll2106
-rw-r--r--llvm/test/CodeGen/AMDGPU/local-atomicrmw-fmin.ll2106
-rw-r--r--llvm/test/CodeGen/AMDGPU/local-atomicrmw-fsub.ll2398
-rw-r--r--llvm/test/CodeGen/AMDGPU/lshr.v2i16.ll25
-rw-r--r--llvm/test/CodeGen/AMDGPU/machine-sink-temporal-divergence-swdev407790.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/mad_uint24.ll42
-rw-r--r--llvm/test/CodeGen/AMDGPU/max.ll86
-rw-r--r--llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll18
-rw-r--r--llvm/test/CodeGen/AMDGPU/memmove-var-size.ll408
-rw-r--r--llvm/test/CodeGen/AMDGPU/mul_int24.ll129
-rw-r--r--llvm/test/CodeGen/AMDGPU/mul_uint24-amdgcn.ll106
-rw-r--r--llvm/test/CodeGen/AMDGPU/nofpclass-call.ll191
-rw-r--r--llvm/test/CodeGen/AMDGPU/or.ll26
-rw-r--r--llvm/test/CodeGen/AMDGPU/reg-coalescer-subreg-liveness.mir2
-rw-r--r--llvm/test/CodeGen/AMDGPU/set-inactive-wwm-overwrite.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/sext-divergence-driven-isel.ll15
-rw-r--r--llvm/test/CodeGen/AMDGPU/shl.v2i16.ll66
-rw-r--r--llvm/test/CodeGen/AMDGPU/shufflevector.v4f32.v3f32.ll25
-rw-r--r--llvm/test/CodeGen/AMDGPU/shufflevector.v4i32.v3i32.ll25
-rw-r--r--llvm/test/CodeGen/AMDGPU/shufflevector.v4p3.v3p3.ll25
-rw-r--r--llvm/test/CodeGen/AMDGPU/sign_extend.ll90
-rw-r--r--llvm/test/CodeGen/AMDGPU/skip-if-dead.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/sminmax.v2i16.ll63
-rw-r--r--llvm/test/CodeGen/AMDGPU/sub.ll13
-rw-r--r--llvm/test/CodeGen/AMDGPU/sub.v2i16.ll41
-rw-r--r--llvm/test/CodeGen/AMDGPU/udiv.ll62
-rw-r--r--llvm/test/CodeGen/AMDGPU/udiv64.ll28
-rw-r--r--llvm/test/CodeGen/AMDGPU/while-break.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/xor.ll13
-rw-r--r--llvm/test/CodeGen/AMDGPU/zext-divergence-driven-isel.ll15
-rw-r--r--llvm/test/CodeGen/ARM/llvm.sincospi.ll249
-rw-r--r--llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-array-of-struct.ll59
-rw-r--r--llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-array-of-vector.ll49
-rw-r--r--llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-array-typedgep.ll30
-rw-r--r--llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-arrays.ll145
-rw-r--r--llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-dynamic-struct.ll64
-rw-r--r--llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-dynamic.ll46
-rw-r--r--llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-scalars.ll101
-rw-r--r--llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-vectors.ll121
-rw-r--r--llvm/test/CodeGen/Hexagon/instrprof-custom.ll7
-rw-r--r--llvm/test/CodeGen/MLRegAlloc/dev-mode-extra-features-logging.ll48
-rw-r--r--llvm/test/CodeGen/PowerPC/llvm.sincos.ll72
-rw-r--r--llvm/test/CodeGen/PowerPC/llvm.sincospi.ll21
-rw-r--r--llvm/test/CodeGen/PowerPC/llvm.sincospi.ppcfp128.ll25
-rw-r--r--llvm/test/CodeGen/PowerPC/milicode32.ll56
-rw-r--r--llvm/test/CodeGen/PowerPC/milicode64.ll79
-rw-r--r--llvm/test/CodeGen/RISCV/attributes.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/features-info.ll1
-rw-r--r--llvm/test/CodeGen/RISCV/overflow-intrinsics.ll48
-rw-r--r--llvm/test/CodeGen/SPIRV/non_int_constant_null.ll25
-rw-r--r--llvm/test/CodeGen/SPIRV/opencl/unpackhalf2x16-error.ll11
-rw-r--r--llvm/test/CodeGen/SPIRV/spirv_param_decorations_quals.ll4
-rw-r--r--llvm/test/CodeGen/SPIRV/zero-length-array.ll13
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/constbound.ll18
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/minloop.ll24
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/varying-outer-2d-reduction.ll34
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/while-loops.ll91
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-float32regloops.ll211
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-gather-increment.ll24
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-gather-scatter-optimisation.ll90
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-pipelineloops.ll52
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-shuffle.ll13
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-vld4.ll13
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-vmaxnma-commute.ll24
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-vst4.ll14
-rw-r--r--llvm/test/CodeGen/Thumb2/pacbti-m-vla.ll2
-rw-r--r--llvm/test/CodeGen/WebAssembly/memory-interleave.ll278
-rw-r--r--llvm/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll20
-rw-r--r--llvm/test/CodeGen/X86/3addr-16bit.ll48
-rw-r--r--llvm/test/CodeGen/X86/StackColoring-dbg-invariance.mir2
-rw-r--r--llvm/test/CodeGen/X86/atomic-rm-bit-test.ll22
-rw-r--r--llvm/test/CodeGen/X86/atomicrmw-fadd-fp-vector.ll3
-rw-r--r--llvm/test/CodeGen/X86/basic-block-sections-list.ll62
-rw-r--r--llvm/test/CodeGen/X86/basic-block-sections-source-drift.ll8
-rw-r--r--llvm/test/CodeGen/X86/bitcast-vector-bool.ll32
-rw-r--r--llvm/test/CodeGen/X86/coalescer-dead-flag-verifier-error.ll4
-rw-r--r--llvm/test/CodeGen/X86/fold-loop-of-urem.ll81
-rw-r--r--llvm/test/CodeGen/X86/freeze-binary.ll26
-rw-r--r--llvm/test/CodeGen/X86/i128-mul.ll178
-rw-r--r--llvm/test/CodeGen/X86/icmp-abs-C.ll22
-rw-r--r--llvm/test/CodeGen/X86/ldexp-avx512.ll1288
-rw-r--r--llvm/test/CodeGen/X86/llvm.sincospi.ll233
-rw-r--r--llvm/test/CodeGen/X86/masked_gather_scatter.ll12
-rw-r--r--llvm/test/CodeGen/X86/midpoint-int.ll28
-rw-r--r--llvm/test/CodeGen/X86/mmx-arith.ll3
-rw-r--r--llvm/test/CodeGen/X86/mul-constant-i16.ll8
-rw-r--r--llvm/test/CodeGen/X86/mul-constant-i32.ll16
-rw-r--r--llvm/test/CodeGen/X86/mul-constant-i8.ll4
-rw-r--r--llvm/test/CodeGen/X86/optimize-max-0.ll211
-rw-r--r--llvm/test/CodeGen/X86/parity.ll30
-rw-r--r--llvm/test/CodeGen/X86/pr166534.ll68
-rw-r--r--llvm/test/CodeGen/X86/pr166744.ll14
-rw-r--r--llvm/test/CodeGen/X86/rotate-extract.ll4
-rw-r--r--llvm/test/CodeGen/X86/smul_fix.ll8
-rw-r--r--llvm/test/CodeGen/X86/sshl_sat.ll40
-rw-r--r--llvm/test/CodeGen/X86/sshl_sat_vec.ll113
-rw-r--r--llvm/test/CodeGen/X86/stackmap.ll9
-rw-r--r--llvm/test/CodeGen/X86/subvectorwise-store-of-vector-splat.ll210
-rw-r--r--llvm/test/CodeGen/X86/twoaddr-lea.ll2
-rw-r--r--llvm/test/CodeGen/X86/umul_fix.ll8
-rw-r--r--llvm/test/CodeGen/X86/ushl_sat.ll28
-rw-r--r--llvm/test/CodeGen/X86/ushl_sat_vec.ll111
-rw-r--r--llvm/test/CodeGen/X86/vector-mulfix-legalize.ll34
-rw-r--r--llvm/test/CodeGen/X86/vector-reduce-xor-bool.ll160
-rw-r--r--llvm/test/CodeGen/X86/wide-scalar-shift-by-byte-multiple-legalization.ll6065
-rw-r--r--llvm/test/CodeGen/X86/wide-scalar-shift-legalization.ll1344
-rw-r--r--llvm/test/CodeGen/X86/widen-load-of-small-alloca-with-zero-upper-half.ll328
-rw-r--r--llvm/test/CodeGen/X86/widen-load-of-small-alloca.ll95
-rw-r--r--llvm/test/CodeGen/X86/x86-shrink-wrapping.ll18
-rw-r--r--llvm/test/CodeGen/X86/xor.ll132
-rw-r--r--llvm/test/DebugInfo/MIR/X86/clobbered-fragments.mir3
-rw-r--r--llvm/test/DebugInfo/MIR/X86/machine-cse.mir5
-rw-r--r--llvm/test/DebugInfo/MIR/X86/remove-redundant-dbg-vals.mir10
-rw-r--r--llvm/test/DebugInfo/X86/instr-ref-selectiondag.ll12
-rw-r--r--llvm/test/DebugInfo/X86/live-debug-values-constprop.mir9
-rw-r--r--llvm/test/DebugInfo/X86/live-debug-values-remove-range.ll2
-rw-r--r--llvm/test/DebugInfo/X86/live-debug-vars-intervals.mir2
-rw-r--r--llvm/test/Linker/thinlto_funcimport_debug.ll4
-rw-r--r--llvm/test/MC/AArch64/prfum.s44
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop1_err.s80
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1-fake16.s72
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1.s72
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp16-fake16.s96
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp16.s96
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp8-fake16.s96
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp8.s96
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1.txt96
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1_dpp16.txt96
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1_dpp8.txt96
-rw-r--r--llvm/test/MC/RISCV/attribute-arch.s3
-rw-r--r--llvm/test/Other/new-pm-defaults.ll1
-rw-r--r--llvm/test/Other/new-pm-lto-defaults.ll1
-rw-r--r--llvm/test/Other/new-pm-thinlto-postlink-defaults.ll1
-rw-r--r--llvm/test/Other/new-pm-thinlto-postlink-pgo-defaults.ll1
-rw-r--r--llvm/test/Other/new-pm-thinlto-postlink-samplepgo-defaults.ll1
-rw-r--r--llvm/test/Transforms/CodeExtractor/PartialInlineDebug.ll6
-rw-r--r--llvm/test/Transforms/DropUnnecessaryAssumes/dereferenceable.ll54
-rw-r--r--llvm/test/Transforms/HotColdSplit/split-out-dbg-label.ll2
-rw-r--r--llvm/test/Transforms/HotColdSplit/transfer-debug-info.ll2
-rw-r--r--llvm/test/Transforms/InstCombine/debuginfo-dce.ll8
-rw-r--r--llvm/test/Transforms/LCSSA/rewrite-existing-dbg-values.ll3
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/AArch64/vscale-fixups.ll5
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll21
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll124
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/extractvalue-no-scalarization-required.ll22
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/induction-costs-sve.ll245
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-extractvalue.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/ARM/optsize_minsize.ll189
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/WebAssembly/memory-interleave.ll82
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/CostModel/masked-interleaved-store-i16.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/fixed-order-recurrence.ll67
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/limit-vf-by-tripcount.ll33
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/load-deref-pred.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/debugloc.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/first-order-recurrence-sink-replicate-region.ll55
-rw-r--r--llvm/test/Transforms/LoopVectorize/first-order-recurrence-with-uniform-ops.ll20
-rw-r--r--llvm/test/Transforms/LoopVectorize/narrow-to-single-scalar.ll76
-rw-r--r--llvm/test/Transforms/LoopVectorize/pr50686.ll18
-rw-r--r--llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll32
-rw-r--r--llvm/test/Transforms/LoopVectorize/vplan-sink-scalars-and-merge.ll2
-rw-r--r--llvm/test/Transforms/LowerMatrixIntrinsics/remarks-inlining.ll10
-rw-r--r--llvm/test/Transforms/LowerMatrixIntrinsics/remarks.ll16
-rw-r--r--llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll68
-rw-r--r--llvm/test/Transforms/PhaseOrdering/AArch64/std-find.ll8
-rw-r--r--llvm/test/Transforms/Util/DeclareRuntimeLibcalls/armpl.ll21
-rw-r--r--llvm/test/Transforms/Util/DeclareRuntimeLibcalls/sleef.ll20
-rw-r--r--llvm/test/Transforms/Util/annotation-remarks-dbg-info.ll2
-rw-r--r--llvm/test/tools/llc/save-stats.ll10
-rw-r--r--llvm/tools/llvm-exegesis/lib/X86/Target.cpp1
-rw-r--r--llvm/tools/llvm-gpu-loader/amdhsa.cpp1
-rw-r--r--llvm/tools/llvm-mc/Disassembler.h2
-rw-r--r--llvm/tools/llvm-stress/llvm-stress.cpp1
-rw-r--r--llvm/tools/llvm-xray/trie-node.h1
-rw-r--r--llvm/unittests/ADT/CombinationGeneratorTest.cpp1
-rw-r--r--llvm/unittests/ADT/DAGDeltaAlgorithmTest.cpp1
-rw-r--r--llvm/unittests/ADT/DeltaAlgorithmTest.cpp1
-rw-r--r--llvm/unittests/ADT/STLForwardCompatTest.cpp1
-rw-r--r--llvm/unittests/ADT/SequenceTest.cpp5
-rw-r--r--llvm/unittests/Analysis/DomTreeUpdaterTest.cpp1
-rw-r--r--llvm/unittests/CodeGen/MFCommon.inc4
-rw-r--r--llvm/unittests/ExecutionEngine/Orc/LibraryResolverTest.cpp1
-rw-r--r--llvm/unittests/IR/VFABIDemanglerTest.cpp1
-rw-r--r--llvm/unittests/Support/SpecialCaseListTest.cpp32
-rw-r--r--llvm/unittests/TargetParser/RISCVISAInfoTest.cpp1
-rw-r--r--llvm/unittests/TextAPI/TextStubHelpers.h1
-rw-r--r--llvm/utils/KillTheDoctor/KillTheDoctor.cpp1
-rw-r--r--llvm/utils/TableGen/Common/CodeGenHwModes.h1
-rw-r--r--llvm/utils/TableGen/Common/CodeGenTarget.h1
-rw-r--r--llvm/utils/TableGen/InstrInfoEmitter.cpp9
-rw-r--r--llvm/utils/TableGen/TableGenBackends.h2
-rw-r--r--llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/bugprone/BUILD.gn2
-rw-r--r--llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/cert/BUILD.gn2
-rw-r--r--llvm/utils/gn/secondary/libcxx/include/BUILD.gn3
-rw-r--r--llvm/utils/gn/secondary/llvm/lib/Analysis/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/llvm/lib/IR/BUILD.gn1
458 files changed, 18006 insertions, 14991 deletions
diff --git a/llvm/benchmarks/FormatVariadicBM.cpp b/llvm/benchmarks/FormatVariadicBM.cpp
index 3e851f0..b451d10 100644
--- a/llvm/benchmarks/FormatVariadicBM.cpp
+++ b/llvm/benchmarks/FormatVariadicBM.cpp
@@ -10,6 +10,7 @@
#include "llvm/Support/FormatVariadic.h"
#include <algorithm>
#include <string>
+#include <vector>
using namespace llvm;
using namespace std;
diff --git a/llvm/docs/RISCVUsage.rst b/llvm/docs/RISCVUsage.rst
index d03f383..a21f03d 100644
--- a/llvm/docs/RISCVUsage.rst
+++ b/llvm/docs/RISCVUsage.rst
@@ -351,6 +351,9 @@ The primary goal of experimental support is to assist in the process of ratifica
``experimental-zvqdotq``
LLVM implements the `0.0.1 draft specification <https://github.com/riscv/riscv-dot-product/releases/tag/v0.0.1>`__.
+``experimental-smpmpmt``
+ LLVM implements the `0.6 draft specification <https://github.com/riscv/riscv-isa-manual/blob/smpmpmt/src/smpmpmt.adoc>`__.
+
To use an experimental extension from `clang`, you must add `-menable-experimental-extensions` to the command line, and specify the exact version of the experimental extension you are using. To use an experimental extension with LLVM's internal developer tools (e.g. `llc`, `llvm-objdump`, `llvm-mc`), you must prefix the extension name with `experimental-`. Note that you don't need to specify the version with internal tools, and shouldn't include the `experimental-` prefix with `clang`.
Vendor Extensions
diff --git a/llvm/docs/SandboxIR.md b/llvm/docs/SandboxIR.md
index d2b612b..dbf488b 100644
--- a/llvm/docs/SandboxIR.md
+++ b/llvm/docs/SandboxIR.md
@@ -8,8 +8,8 @@ Within your LLVM pass:
``` C++
// 1. Include the necessary Sandbox IR header files.
-#include "llvm/SandboxIR/Context.h
-#include "llvm/SandboxIR/Function.h
+#include "llvm/SandboxIR/Context.h"
+#include "llvm/SandboxIR/Function.h"
// 2. Create a sandboxir::Context using LLVMContext `LLVMCtx`.
sandboxir::Context Ctx(LLVMCtx);
diff --git a/llvm/include/llvm/Analysis/InlineSizeEstimatorAnalysis.h b/llvm/include/llvm/Analysis/InlineSizeEstimatorAnalysis.h
deleted file mode 100644
index b44edd3..0000000
--- a/llvm/include/llvm/Analysis/InlineSizeEstimatorAnalysis.h
+++ /dev/null
@@ -1,47 +0,0 @@
-//===- InlineSizeEstimatorAnalysis.h - ML size estimator --------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-
-#ifndef LLVM_ANALYSIS_INLINESIZEESTIMATORANALYSIS_H
-#define LLVM_ANALYSIS_INLINESIZEESTIMATORANALYSIS_H
-
-#include "llvm/IR/PassManager.h"
-
-namespace llvm {
-class Function;
-
-class TFModelEvaluator;
-class InlineSizeEstimatorAnalysis
- : public AnalysisInfoMixin<InlineSizeEstimatorAnalysis> {
-public:
- InlineSizeEstimatorAnalysis();
- InlineSizeEstimatorAnalysis(InlineSizeEstimatorAnalysis &&);
- ~InlineSizeEstimatorAnalysis();
-
- static AnalysisKey Key;
- using Result = std::optional<size_t>;
- Result run(const Function &F, FunctionAnalysisManager &FAM);
- static bool isEvaluatorRequested();
-
-private:
- std::unique_ptr<TFModelEvaluator> Evaluator;
-};
-
-class InlineSizeEstimatorAnalysisPrinterPass
- : public PassInfoMixin<InlineSizeEstimatorAnalysisPrinterPass> {
- raw_ostream &OS;
-
-public:
- explicit InlineSizeEstimatorAnalysisPrinterPass(raw_ostream &OS) : OS(OS) {}
-
- PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
-
- static bool isRequired() { return true; }
-};
-} // namespace llvm
-#endif // LLVM_ANALYSIS_INLINESIZEESTIMATORANALYSIS_H
diff --git a/llvm/include/llvm/Analysis/TargetLibraryInfo.h b/llvm/include/llvm/Analysis/TargetLibraryInfo.h
index 3f39b47..7895443 100644
--- a/llvm/include/llvm/Analysis/TargetLibraryInfo.h
+++ b/llvm/include/llvm/Analysis/TargetLibraryInfo.h
@@ -23,6 +23,7 @@
namespace llvm {
template <typename T> class ArrayRef;
+enum class VectorLibrary;
/// Provides info so a possible vectorization of a function can be
/// computed. Function 'VectorFnName' is equivalent to 'ScalarFnName'
@@ -117,25 +118,6 @@ class TargetLibraryInfoImpl {
const Module &M) const;
public:
- /// List of known vector-functions libraries.
- ///
- /// The vector-functions library defines, which functions are vectorizable
- /// and with which factor. The library can be specified by either frontend,
- /// or a commandline option, and then used by
- /// addVectorizableFunctionsFromVecLib for filling up the tables of
- /// vectorizable functions.
- enum VectorLibrary {
- NoLibrary, // Don't use any vector library.
- Accelerate, // Use Accelerate framework.
- DarwinLibSystemM, // Use Darwin's libsystem_m.
- LIBMVEC, // GLIBC Vector Math library.
- MASSV, // IBM MASS vector library.
- SVML, // Intel short vector math library.
- SLEEFGNUABI, // SLEEF - SIMD Library for Evaluating Elementary Functions.
- ArmPL, // Arm Performance Libraries.
- AMDLIBM // AMD Math Vector library.
- };
-
TargetLibraryInfoImpl() = delete;
LLVM_ABI explicit TargetLibraryInfoImpl(const Triple &T);
diff --git a/llvm/include/llvm/Bitcode/BitcodeWriter.h b/llvm/include/llvm/Bitcode/BitcodeWriter.h
index 1e72e84..d88e261 100644
--- a/llvm/include/llvm/Bitcode/BitcodeWriter.h
+++ b/llvm/include/llvm/Bitcode/BitcodeWriter.h
@@ -20,7 +20,6 @@
#include "llvm/Support/Compiler.h"
#include "llvm/Support/MemoryBufferRef.h"
#include <memory>
-#include <string>
#include <vector>
namespace llvm {
diff --git a/llvm/include/llvm/CodeGen/BasicBlockSectionsProfileReader.h b/llvm/include/llvm/CodeGen/BasicBlockSectionsProfileReader.h
index 7b1a5f5..ee1f283 100644
--- a/llvm/include/llvm/CodeGen/BasicBlockSectionsProfileReader.h
+++ b/llvm/include/llvm/CodeGen/BasicBlockSectionsProfileReader.h
@@ -68,17 +68,13 @@ public:
BasicBlockSectionsProfileReader() = default;
- // Returns true if basic block sections profile exist for function \p
- // FuncName.
+ // Returns true if function \p FuncName is hot based on the basic block
+ // section profile.
bool isFunctionHot(StringRef FuncName) const;
- // Returns a pair with first element representing whether basic block sections
- // profile exist for the function \p FuncName, and the second element
- // representing the basic block sections profile (cluster info) for this
- // function. If the first element is true and the second element is empty, it
- // means unique basic block sections are desired for all basic blocks of the
- // function.
- std::pair<bool, SmallVector<BBClusterInfo>>
+ // Returns the cluster info for the function \p FuncName. Returns an empty
+ // vector if function has no cluster info.
+ SmallVector<BBClusterInfo>
getClusterInfoForFunction(StringRef FuncName) const;
// Returns the path clonings for the given function.
@@ -190,7 +186,7 @@ public:
bool isFunctionHot(StringRef FuncName) const;
- std::pair<bool, SmallVector<BBClusterInfo>>
+ SmallVector<BBClusterInfo>
getClusterInfoForFunction(StringRef FuncName) const;
SmallVector<SmallVector<unsigned>>
diff --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h
index f585257..1c167af 100644
--- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h
+++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h
@@ -316,12 +316,22 @@ private:
EVT ScalarVT = VT.getScalarType();
RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
+ /// Migration flag. IsVectorCall cases directly know about the vector
+ /// libcall in RuntimeLibcallsInfo and shouldn't try to use
+ /// LibInfo->getVectorMappingInfo.
+ bool IsVectorCall = false;
+
switch (ICA.getID()) {
case Intrinsic::modf:
LC = RTLIB::getMODF(ScalarVT);
break;
case Intrinsic::sincospi:
- LC = RTLIB::getSINCOSPI(ScalarVT);
+ LC = RTLIB::getSINCOSPI(VT);
+ if (LC == RTLIB::UNKNOWN_LIBCALL)
+ LC = RTLIB::getSINCOSPI(ScalarVT);
+ else if (VT.isVector())
+ IsVectorCall = true;
+
break;
case Intrinsic::sincos:
LC = RTLIB::getSINCOS(ScalarVT);
@@ -345,17 +355,23 @@ private:
LLVMContext &Ctx = RetTy->getContext();
ElementCount VF = getVectorizedTypeVF(RetTy);
VecDesc const *VD = nullptr;
- for (bool Masked : {false, true}) {
- if ((VD = LibInfo->getVectorMappingInfo(LCName, VF, Masked)))
- break;
+
+ if (!IsVectorCall) {
+ for (bool Masked : {false, true}) {
+ if ((VD = LibInfo->getVectorMappingInfo(LCName, VF, Masked)))
+ break;
+ }
+ if (!VD)
+ return std::nullopt;
}
- if (!VD)
- return std::nullopt;
// Cost the call + mask.
auto Cost =
thisT()->getCallInstrCost(nullptr, RetTy, ICA.getArgTypes(), CostKind);
- if (VD->isMasked()) {
+
+ if ((VD && VD->isMasked()) ||
+ (IsVectorCall &&
+ RTLIB::RuntimeLibcallsInfo::hasVectorMaskArgument(LibcallImpl))) {
auto VecTy = VectorType::get(IntegerType::getInt1Ty(Ctx), VF);
Cost += thisT()->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy,
VecTy, {}, CostKind, 0, nullptr, {});
diff --git a/llvm/include/llvm/CodeGen/MachineBasicBlock.h b/llvm/include/llvm/CodeGen/MachineBasicBlock.h
index 7173927..fcf7bab 100644
--- a/llvm/include/llvm/CodeGen/MachineBasicBlock.h
+++ b/llvm/include/llvm/CodeGen/MachineBasicBlock.h
@@ -129,7 +129,7 @@ public:
MCRegister PhysReg;
LaneBitmask LaneMask;
- RegisterMaskPair(MCPhysReg PhysReg, LaneBitmask LaneMask)
+ RegisterMaskPair(MCRegister PhysReg, LaneBitmask LaneMask)
: PhysReg(PhysReg), LaneMask(LaneMask) {}
bool operator==(const RegisterMaskPair &other) const {
diff --git a/llvm/include/llvm/CodeGen/SelectionDAG.h b/llvm/include/llvm/CodeGen/SelectionDAG.h
index 0dd4f23c..62d2f22 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAG.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAG.h
@@ -1725,9 +1725,17 @@ public:
/// value.
LLVM_ABI bool
expandMultipleResultFPLibCall(RTLIB::Libcall LC, SDNode *Node,
- SmallVectorImpl<SDValue> &Results,
+ SmallVectorImpl<SDValue> &Results, EVT CallType,
std::optional<unsigned> CallRetResNo = {});
+ // FIXME: Ths should be removed, and form using RTLIB::Libcall should be
+ // preferred. Callers should resolve the exact type libcall to use.
+ LLVM_ABI bool
+ expandMultipleResultFPLibCall(StringRef LibcallName, CallingConv::ID CC,
+ SDNode *Node, SmallVectorImpl<SDValue> &Results,
+ std::optional<unsigned> CallRetResNo = {},
+ bool IsVectorMasked = false);
+
/// Expand the specified \c ISD::VAARG node as the Legalize pass would.
LLVM_ABI SDValue expandVAArg(SDNode *Node);
@@ -2072,6 +2080,10 @@ public:
/// We use this predicate to simplify operations downstream.
LLVM_ABI bool SignBitIsZero(SDValue Op, unsigned Depth = 0) const;
+ /// Return true if the sign bit of Op is known to be zero, for a
+ /// floating-point value.
+ LLVM_ABI bool SignBitIsZeroFP(SDValue Op, unsigned Depth = 0) const;
+
/// Return true if 'Op & Mask' is known to be zero. We
/// use this predicate to simplify operations downstream. Op and Mask are
/// known to be the same type.
diff --git a/llvm/include/llvm/CodeGen/TargetInstrInfo.h b/llvm/include/llvm/CodeGen/TargetInstrInfo.h
index 7010cff..02cd925 100644
--- a/llvm/include/llvm/CodeGen/TargetInstrInfo.h
+++ b/llvm/include/llvm/CodeGen/TargetInstrInfo.h
@@ -113,15 +113,18 @@ struct ExtAddrMode {
///
class LLVM_ABI TargetInstrInfo : public MCInstrInfo {
protected:
+ const TargetRegisterInfo &TRI;
+
/// Subtarget specific sub-array of MCInstrInfo's RegClassByHwModeTables
/// (i.e. the table for the active HwMode). This should be indexed by
/// MCOperandInfo's RegClass field for LookupRegClassByHwMode operands.
const int16_t *const RegClassByHwMode;
- TargetInstrInfo(unsigned CFSetupOpcode = ~0u, unsigned CFDestroyOpcode = ~0u,
- unsigned CatchRetOpcode = ~0u, unsigned ReturnOpcode = ~0u,
+ TargetInstrInfo(const TargetRegisterInfo &TRI, unsigned CFSetupOpcode = ~0u,
+ unsigned CFDestroyOpcode = ~0u, unsigned CatchRetOpcode = ~0u,
+ unsigned ReturnOpcode = ~0u,
const int16_t *const RegClassByHwModeTable = nullptr)
- : RegClassByHwMode(RegClassByHwModeTable),
+ : TRI(TRI), RegClassByHwMode(RegClassByHwModeTable),
CallFrameSetupOpcode(CFSetupOpcode),
CallFrameDestroyOpcode(CFDestroyOpcode), CatchRetOpcode(CatchRetOpcode),
ReturnOpcode(ReturnOpcode) {}
@@ -131,6 +134,8 @@ public:
TargetInstrInfo &operator=(const TargetInstrInfo &) = delete;
virtual ~TargetInstrInfo();
+ const TargetRegisterInfo &getRegisterInfo() const { return TRI; }
+
static bool isGenericOpcode(unsigned Opc) {
return Opc <= TargetOpcode::GENERIC_OP_END;
}
diff --git a/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h b/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h
index a8c7a8a..a1a130a 100644
--- a/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h
+++ b/llvm/include/llvm/CodeGen/TargetSubtargetInfo.h
@@ -210,6 +210,10 @@ public:
/// can be overridden.
virtual bool enableJoinGlobalCopies() const;
+ /// Hack to bring up option. This should be unconditionally true, all targets
+ /// should enable it and delete this.
+ virtual bool enableTerminalRule() const { return false; }
+
/// True if the subtarget should run a scheduler after register allocation.
///
/// By default this queries the PostRAScheduling bit in the scheduling model
diff --git a/llvm/include/llvm/DebugInfo/GSYM/GsymContext.h b/llvm/include/llvm/DebugInfo/GSYM/GsymContext.h
index e3e9b2b..f9382fa 100644
--- a/llvm/include/llvm/DebugInfo/GSYM/GsymContext.h
+++ b/llvm/include/llvm/DebugInfo/GSYM/GsymContext.h
@@ -12,7 +12,6 @@
#include "llvm/DebugInfo/DIContext.h"
#include <cstdint>
#include <memory>
-#include <string>
namespace llvm {
diff --git a/llvm/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h b/llvm/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h
index be8cb92..fc01afc 100644
--- a/llvm/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h
+++ b/llvm/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h
@@ -45,7 +45,6 @@
#include <functional>
#include <iterator>
#include <memory>
-#include <optional>
#include <utility>
namespace llvm {
diff --git a/llvm/include/llvm/Frontend/OpenMP/ClauseT.h b/llvm/include/llvm/Frontend/OpenMP/ClauseT.h
index a86dca0..12dfb6c 100644
--- a/llvm/include/llvm/Frontend/OpenMP/ClauseT.h
+++ b/llvm/include/llvm/Frontend/OpenMP/ClauseT.h
@@ -50,7 +50,6 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
-#include <algorithm>
#include <iterator>
#include <optional>
#include <tuple>
diff --git a/llvm/include/llvm/Frontend/OpenMP/OMPConstants.h b/llvm/include/llvm/Frontend/OpenMP/OMPConstants.h
index 7bec7e0..1ac9ac0 100644
--- a/llvm/include/llvm/Frontend/OpenMP/OMPConstants.h
+++ b/llvm/include/llvm/Frontend/OpenMP/OMPConstants.h
@@ -190,6 +190,16 @@ enum class OMPScheduleType {
LLVM_MARK_AS_BITMASK_ENUM(/* LargestValue */ ModifierMask)
};
+/// The fallback types for the dyn_groupprivate clause.
+enum class OMPDynGroupprivateFallbackType : uint64_t {
+ /// Abort the execution.
+ Abort = 0,
+ /// Return null pointer.
+ Null = 1,
+ /// Allocate from a implementation defined memory space.
+ DefaultMem = 2
+};
+
// Default OpenMP mapper name suffix.
inline constexpr const char *OmpDefaultMapperName = ".omp.default.mapper";
diff --git a/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h b/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h
index b3d7ab4..9f77c24 100644
--- a/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h
+++ b/llvm/include/llvm/Frontend/OpenMP/OMPIRBuilder.h
@@ -2446,20 +2446,24 @@ public:
/// The number of threads.
ArrayRef<Value *> NumThreads;
/// The size of the dynamic shared memory.
- Value *DynCGGroupMem = nullptr;
+ Value *DynCGroupMem = nullptr;
/// True if the kernel has 'no wait' clause.
bool HasNoWait = false;
+ /// The fallback mechanism for the shared memory.
+ omp::OMPDynGroupprivateFallbackType DynCGroupMemFallback =
+ omp::OMPDynGroupprivateFallbackType::Abort;
// Constructors for TargetKernelArgs.
TargetKernelArgs() = default;
TargetKernelArgs(unsigned NumTargetItems, TargetDataRTArgs RTArgs,
Value *NumIterations, ArrayRef<Value *> NumTeams,
- ArrayRef<Value *> NumThreads, Value *DynCGGroupMem,
- bool HasNoWait)
+ ArrayRef<Value *> NumThreads, Value *DynCGroupMem,
+ bool HasNoWait,
+ omp::OMPDynGroupprivateFallbackType DynCGroupMemFallback)
: NumTargetItems(NumTargetItems), RTArgs(RTArgs),
NumIterations(NumIterations), NumTeams(NumTeams),
- NumThreads(NumThreads), DynCGGroupMem(DynCGGroupMem),
- HasNoWait(HasNoWait) {}
+ NumThreads(NumThreads), DynCGroupMem(DynCGroupMem),
+ HasNoWait(HasNoWait), DynCGroupMemFallback(DynCGroupMemFallback) {}
};
/// Create the kernel args vector used by emitTargetKernel. This function
@@ -3244,6 +3248,10 @@ public:
/// dependency information as passed in the depend clause
/// \param HasNowait Whether the target construct has a `nowait` clause or
/// not.
+ /// \param DynCGroupMem The size of the dynamic groupprivate memory for each
+ /// cgroup.
+ /// \param DynCGroupMem The fallback mechanism to execute if the requested
+ /// cgroup memory cannot be provided.
LLVM_ABI InsertPointOrErrorTy createTarget(
const LocationDescription &Loc, bool IsOffloadEntry,
OpenMPIRBuilder::InsertPointTy AllocaIP,
@@ -3255,7 +3263,10 @@ public:
TargetBodyGenCallbackTy BodyGenCB,
TargetGenArgAccessorsCallbackTy ArgAccessorFuncCB,
CustomMapperCallbackTy CustomMapperCB,
- const SmallVector<DependData> &Dependencies, bool HasNowait = false);
+ const SmallVector<DependData> &Dependencies, bool HasNowait = false,
+ Value *DynCGroupMem = nullptr,
+ omp::OMPDynGroupprivateFallbackType DynCGroupMemFallback =
+ omp::OMPDynGroupprivateFallbackType::Abort);
/// Returns __kmpc_for_static_init_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned. Will create a distribute call
@@ -3654,7 +3665,7 @@ public:
/// \param Name Name of the variable.
LLVM_ABI GlobalVariable *
getOrCreateInternalVariable(Type *Ty, const StringRef &Name,
- unsigned AddressSpace = 0);
+ std::optional<unsigned> AddressSpace = {});
};
/// Class to represented the control flow structure of an OpenMP canonical loop.
diff --git a/llvm/include/llvm/IR/DebugInfo.h b/llvm/include/llvm/IR/DebugInfo.h
index 33e6df0..862293c 100644
--- a/llvm/include/llvm/IR/DebugInfo.h
+++ b/llvm/include/llvm/IR/DebugInfo.h
@@ -108,7 +108,7 @@ public:
LLVM_ABI void processInstruction(const Module &M, const Instruction &I);
/// Process a DILocalVariable.
- LLVM_ABI void processVariable(DILocalVariable *DVI);
+ LLVM_ABI void processVariable(const DILocalVariable *DVI);
/// Process debug info location.
LLVM_ABI void processLocation(const Module &M, const DILocation *Loc);
/// Process a DbgRecord.
@@ -124,7 +124,7 @@ private:
void processCompileUnit(DICompileUnit *CU);
void processScope(DIScope *Scope);
void processType(DIType *DT);
- void processImportedEntity(DIImportedEntity *Import);
+ void processImportedEntity(const DIImportedEntity *Import);
bool addCompileUnit(DICompileUnit *CU);
bool addGlobalVariable(DIGlobalVariableExpression *DIG);
bool addScope(DIScope *Scope);
diff --git a/llvm/include/llvm/IR/DebugInfoMetadata.h b/llvm/include/llvm/IR/DebugInfoMetadata.h
index 7ade6b8..6918b21 100644
--- a/llvm/include/llvm/IR/DebugInfoMetadata.h
+++ b/llvm/include/llvm/IR/DebugInfoMetadata.h
@@ -2554,6 +2554,39 @@ public:
replaceOperandWith(7, N.get());
}
+ /// For the given retained node of DISubprogram, applies one of the
+ /// given functions depending on the type of the node.
+ template <typename T, typename FuncLVT, typename FuncLabelT,
+ typename FuncImportedEntityT, typename FuncUnknownT>
+ static T
+ visitRetainedNode(const Metadata *N, FuncLVT &&FuncLV, FuncLabelT &&FuncLabel,
+ FuncImportedEntityT &&FuncIE, FuncUnknownT &&FuncUnknown) {
+ if (const auto *LV = dyn_cast<DILocalVariable>(N))
+ return FuncLV(LV);
+ if (const auto *L = dyn_cast<DILabel>(N))
+ return FuncLabel(L);
+ if (const auto *IE = dyn_cast<DIImportedEntity>(N))
+ return FuncIE(IE);
+ return FuncUnknown(N);
+ }
+
+ /// Returns the scope of subprogram's retainedNodes.
+ static const DILocalScope *getRetainedNodeScope(const MDNode *N);
+ // For use in Verifier.
+ static const DIScope *getRawRetainedNodeScope(const MDNode *N);
+
+ /// For each retained node, applies one of the given functions depending
+ /// on the type of a node.
+ template <typename FuncLVT, typename FuncLabelT, typename FuncImportedEntityT>
+ void forEachRetainedNode(FuncLVT &&FuncLV, FuncLabelT &&FuncLabel,
+ FuncImportedEntityT &&FuncIE) const {
+ for (MDNode *N : getRetainedNodes())
+ visitRetainedNode<void>(N, FuncLV, FuncLabel, FuncIE,
+ [](const Metadata *N) {
+ llvm_unreachable("Unexpected retained node!");
+ });
+ }
+
/// Check if this subprogram describes the given function.
///
/// FIXME: Should this be looking through bitcasts?
diff --git a/llvm/include/llvm/IR/DiagnosticInfo.h b/llvm/include/llvm/IR/DiagnosticInfo.h
index a426fb0..1c86d18 100644
--- a/llvm/include/llvm/IR/DiagnosticInfo.h
+++ b/llvm/include/llvm/IR/DiagnosticInfo.h
@@ -26,7 +26,6 @@
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/TypeSize.h"
-#include <algorithm>
#include <cstdint>
#include <functional>
#include <iterator>
diff --git a/llvm/include/llvm/IR/Dominators.h b/llvm/include/llvm/IR/Dominators.h
index bf128a3..1209def 100644
--- a/llvm/include/llvm/IR/Dominators.h
+++ b/llvm/include/llvm/IR/Dominators.h
@@ -32,7 +32,6 @@
#include "llvm/Support/CFGUpdate.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/GenericDomTree.h"
-#include <algorithm>
#include <utility>
namespace llvm {
diff --git a/llvm/include/llvm/IR/MemoryModelRelaxationAnnotations.h b/llvm/include/llvm/IR/MemoryModelRelaxationAnnotations.h
index 535635a..fcfb2db 100644
--- a/llvm/include/llvm/IR/MemoryModelRelaxationAnnotations.h
+++ b/llvm/include/llvm/IR/MemoryModelRelaxationAnnotations.h
@@ -21,7 +21,8 @@
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Compiler.h"
-#include <tuple> // for std::pair
+
+#include <utility>
namespace llvm {
diff --git a/llvm/include/llvm/IR/ProfileSummary.h b/llvm/include/llvm/IR/ProfileSummary.h
index 6c087ea..3401215 100644
--- a/llvm/include/llvm/IR/ProfileSummary.h
+++ b/llvm/include/llvm/IR/ProfileSummary.h
@@ -14,7 +14,6 @@
#define LLVM_IR_PROFILESUMMARY_H
#include "llvm/Support/Compiler.h"
-#include <algorithm>
#include <cassert>
#include <cstdint>
#include <vector>
diff --git a/llvm/include/llvm/IR/RuntimeLibcalls.h b/llvm/include/llvm/IR/RuntimeLibcalls.h
index c822b65..0afe32a 100644
--- a/llvm/include/llvm/IR/RuntimeLibcalls.h
+++ b/llvm/include/llvm/IR/RuntimeLibcalls.h
@@ -83,16 +83,7 @@ public:
const Triple &TT,
ExceptionHandling ExceptionModel = ExceptionHandling::None,
FloatABI::ABIType FloatABI = FloatABI::Default,
- EABI EABIVersion = EABI::Default, StringRef ABIName = "") {
- // FIXME: The ExceptionModel parameter is to handle the field in
- // TargetOptions. This interface fails to distinguish the forced disable
- // case for targets which support exceptions by default. This should
- // probably be a module flag and removed from TargetOptions.
- if (ExceptionModel == ExceptionHandling::None)
- ExceptionModel = TT.getDefaultExceptionHandling();
-
- initLibcalls(TT, ExceptionModel, FloatABI, EABIVersion, ABIName);
- }
+ EABI EABIVersion = EABI::Default, StringRef ABIName = "");
explicit RuntimeLibcallsInfo(const Module &M);
@@ -170,6 +161,10 @@ public:
getFunctionTy(LLVMContext &Ctx, const Triple &TT, const DataLayout &DL,
RTLIB::LibcallImpl LibcallImpl) const;
+ /// Returns true if the function has a vector mask argument, which is assumed
+ /// to be the last argument.
+ static bool hasVectorMaskArgument(RTLIB::LibcallImpl Impl);
+
private:
LLVM_ABI static iota_range<RTLIB::LibcallImpl>
lookupLibcallImplNameImpl(StringRef Name);
diff --git a/llvm/include/llvm/IR/RuntimeLibcalls.td b/llvm/include/llvm/IR/RuntimeLibcalls.td
index 24c1b03..ee80606 100644
--- a/llvm/include/llvm/IR/RuntimeLibcalls.td
+++ b/llvm/include/llvm/IR/RuntimeLibcalls.td
@@ -182,6 +182,11 @@ foreach FPTy = ["F32", "F64", "F80", "F128", "PPCF128"] in {
def MODF_#FPTy : RuntimeLibcall;
}
+foreach VecTy = ["V4F32", "V2F64", "NXV4F32", "NXV2F64"] in {
+ def SINCOS_#VecTy : RuntimeLibcall;
+ def SINCOSPI_#VecTy : RuntimeLibcall;
+}
+
def FEGETENV : RuntimeLibcall;
def FESETENV : RuntimeLibcall;
@@ -971,10 +976,6 @@ def frexpf : RuntimeLibcallImpl<FREXP_F32>;
def frexp : RuntimeLibcallImpl<FREXP_F64>;
defm frexpl : LibmLongDoubleLibCall;
-def sincospif : RuntimeLibcallImpl<SINCOSPI_F32>;
-def sincospi : RuntimeLibcallImpl<SINCOSPI_F64>;
-defm sincospil : LibmLongDoubleLibCall;
-
def modff : RuntimeLibcallImpl<MODF_F32>;
def modf : RuntimeLibcallImpl<MODF_F64>;
defm modfl : LibmLongDoubleLibCall;
@@ -1051,6 +1052,15 @@ def sincosf : RuntimeLibcallImpl<SINCOS_F32>;
def sincos : RuntimeLibcallImpl<SINCOS_F64>;
defm sincosl : LibmLongDoubleLibCall;
+// Exists in sun math library
+def sincospif : RuntimeLibcallImpl<SINCOSPI_F32>;
+def sincospi : RuntimeLibcallImpl<SINCOSPI_F64>;
+defm sincospil : LibmLongDoubleLibCall;
+
+// Exists on macOS
+def __sincospif : RuntimeLibcallImpl<SINCOSPI_F32>;
+def __sincospi : RuntimeLibcallImpl<SINCOSPI_F64>;
+
def bzero : RuntimeLibcallImpl<BZERO>;
def __bzero : RuntimeLibcallImpl<BZERO>;
@@ -1079,6 +1089,40 @@ def __security_check_cookie_arm64ec : RuntimeLibcallImpl<SECURITY_CHECK_COOKIE,
"#__security_check_cookie_arm64ec">;
//===----------------------------------------------------------------------===//
+// sleef calls
+//===----------------------------------------------------------------------===//
+
+defset list<RuntimeLibcallImpl> SleefLibcalls = {
+ def _ZGVnN2vl8l8_sincos : RuntimeLibcallImpl<SINCOS_V2F64>;
+ def _ZGVnN4vl4l4_sincosf : RuntimeLibcallImpl<SINCOS_V4F32>;
+ def _ZGVsNxvl8l8_sincos : RuntimeLibcallImpl<SINCOS_NXV2F64>;
+ def _ZGVsNxvl4l4_sincosf : RuntimeLibcallImpl<SINCOS_NXV4F32>;
+
+ def _ZGVnN4vl4l4_sincospif : RuntimeLibcallImpl<SINCOSPI_V4F32>;
+ def _ZGVnN2vl8l8_sincospi : RuntimeLibcallImpl<SINCOSPI_V2F64>;
+ def _ZGVsNxvl4l4_sincospif : RuntimeLibcallImpl<SINCOSPI_NXV4F32>;
+ def _ZGVsNxvl8l8_sincospi : RuntimeLibcallImpl<SINCOSPI_NXV2F64>;
+}
+
+//===----------------------------------------------------------------------===//
+// ARMPL calls
+//===----------------------------------------------------------------------===//
+
+defset list<RuntimeLibcallImpl> ARMPLLibcalls = {
+ def armpl_vsincosq_f64
+ : RuntimeLibcallImpl<SINCOS_V2F64>; // CallingConv::AArch64_VectorCall
+ def armpl_vsincosq_f32
+ : RuntimeLibcallImpl<SINCOS_V4F32>; // CallingConv::AArch64_VectorCall
+ def armpl_svsincos_f64_x : RuntimeLibcallImpl<SINCOS_NXV2F64>;
+ def armpl_svsincos_f32_x : RuntimeLibcallImpl<SINCOS_NXV4F32>;
+
+ def armpl_vsincospiq_f32 : RuntimeLibcallImpl<SINCOSPI_V4F32>;
+ def armpl_vsincospiq_f64 : RuntimeLibcallImpl<SINCOSPI_V2F64>;
+ def armpl_svsincospi_f32_x : RuntimeLibcallImpl<SINCOSPI_NXV4F32>;
+ def armpl_svsincospi_f64_x : RuntimeLibcallImpl<SINCOSPI_NXV2F64>;
+}
+
+//===----------------------------------------------------------------------===//
// F128 libm Runtime Libcalls
//===----------------------------------------------------------------------===//
@@ -1206,7 +1250,9 @@ defvar DefaultLibcallImpls32 = (add DefaultRuntimeLibcallImpls);
defvar DefaultLibcallImpls64 = (add DefaultRuntimeLibcallImpls,
Int128RTLibcalls);
-defvar DarwinSinCosStret = LibcallImpls<(add __sincosf_stret, __sincos_stret),
+// TODO: Guessing sincospi added at same time as sincos_stret
+defvar DarwinSinCosStret = LibcallImpls<(add __sincosf_stret, __sincos_stret,
+ __sincospif, __sincospi),
darwinHasSinCosStret>;
defvar DarwinExp10 = LibcallImpls<(add __exp10f, __exp10), darwinHasExp10>;
@@ -2333,7 +2379,7 @@ defset list<RuntimeLibcallImpl> PPCRuntimeLibcalls = {
defset list<RuntimeLibcallImpl> PPC64AIXCallList = {
def ___memcmp64 : RuntimeLibcallImpl<MEMCMP>;
- def ___memmove64 : RuntimeLibcallImpl<MEMCPY>;
+ def ___memmove64 : RuntimeLibcallImpl<MEMMOVE>;
def ___memset64 : RuntimeLibcallImpl<MEMSET>;
def ___bzero64 : RuntimeLibcallImpl<BZERO>;
def ___strlen64 : RuntimeLibcallImpl<STRLEN>;
diff --git a/llvm/include/llvm/IR/SystemLibraries.h b/llvm/include/llvm/IR/SystemLibraries.h
new file mode 100644
index 0000000..1713b07
--- /dev/null
+++ b/llvm/include/llvm/IR/SystemLibraries.h
@@ -0,0 +1,39 @@
+//===------------------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_SYSTEMLIBRARIES_H
+#define LLVM_IR_SYSTEMLIBRARIES_H
+
+namespace llvm {
+/// List of known vector-functions libraries.
+///
+/// The vector-functions library defines, which functions are vectorizable
+/// and with which factor. The library can be specified by either frontend,
+/// or a commandline option, and then used by
+/// addVectorizableFunctionsFromVecLib for filling up the tables of
+/// vectorizable functions.
+enum class VectorLibrary {
+ NoLibrary, // Don't use any vector library.
+ Accelerate, // Use Accelerate framework.
+ DarwinLibSystemM, // Use Darwin's libsystem_m.
+ LIBMVEC, // GLIBC Vector Math library.
+ MASSV, // IBM MASS vector library.
+ SVML, // Intel short vector math library.
+ SLEEFGNUABI, // SLEEF - SIMD Library for Evaluating Elementary Functions.
+ ArmPL, // Arm Performance Libraries.
+ AMDLIBM // AMD Math Vector library.
+};
+
+/// Command line flag value for the vector math library to use
+///
+/// FIXME: This should come from a module flag, and not be mutually exclusive
+extern VectorLibrary ClVectorLibrary;
+
+} // namespace llvm
+
+#endif // LLVM_IR_SYSTEMLIBRARIES_H
diff --git a/llvm/include/llvm/MC/DXContainerPSVInfo.h b/llvm/include/llvm/MC/DXContainerPSVInfo.h
index 3a2d294..eb6d9e1 100644
--- a/llvm/include/llvm/MC/DXContainerPSVInfo.h
+++ b/llvm/include/llvm/MC/DXContainerPSVInfo.h
@@ -17,7 +17,6 @@
#include "llvm/TargetParser/Triple.h"
#include <array>
-#include <numeric>
#include <stdint.h>
namespace llvm {
diff --git a/llvm/include/llvm/MC/MCAsmInfo.h b/llvm/include/llvm/MC/MCAsmInfo.h
index 7a2e9ad..ea8ac6d 100644
--- a/llvm/include/llvm/MC/MCAsmInfo.h
+++ b/llvm/include/llvm/MC/MCAsmInfo.h
@@ -401,7 +401,7 @@ protected:
// Generated object files can use all ELF features supported by GNU ld of
// this binutils version and later. INT_MAX means all features can be used,
// regardless of GNU ld support. The default value is referenced by
- // clang/Driver/Options.td.
+ // clang/Options/Options.td.
std::pair<int, int> BinutilsVersion = {2, 26};
/// Should we use the integrated assembler?
diff --git a/llvm/include/llvm/MC/MCAssembler.h b/llvm/include/llvm/MC/MCAssembler.h
index 144f211..dbae271 100644
--- a/llvm/include/llvm/MC/MCAssembler.h
+++ b/llvm/include/llvm/MC/MCAssembler.h
@@ -19,13 +19,11 @@
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/SMLoc.h"
-#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
-#include <tuple>
#include <utility>
namespace llvm {
diff --git a/llvm/include/llvm/Option/Arg.h b/llvm/include/llvm/Option/Arg.h
index b1e56b5..496373d 100644
--- a/llvm/include/llvm/Option/Arg.h
+++ b/llvm/include/llvm/Option/Arg.h
@@ -51,7 +51,7 @@ private:
/// Was this argument used to affect compilation?
///
/// This is used to generate an "argument unused" warning (without
- /// clang::driver::options::TargetSpecific) or "unsupported option" error
+ /// clang::options::TargetSpecific) or "unsupported option" error
/// (with TargetSpecific).
mutable unsigned Claimed : 1;
diff --git a/llvm/include/llvm/Passes/PassBuilder.h b/llvm/include/llvm/Passes/PassBuilder.h
index 8538a8b..8fa21f2 100644
--- a/llvm/include/llvm/Passes/PassBuilder.h
+++ b/llvm/include/llvm/Passes/PassBuilder.h
@@ -742,7 +742,7 @@ private:
void addRequiredLTOPreLinkPasses(ModulePassManager &MPM);
void addVectorPasses(OptimizationLevel Level, FunctionPassManager &FPM,
- bool IsFullLTO);
+ ThinOrFullLTOPhase LTOPhase);
static std::optional<std::vector<PipelineElement>>
parsePipelineText(StringRef Text);
diff --git a/llvm/include/llvm/Remarks/YAMLRemarkSerializer.h b/llvm/include/llvm/Remarks/YAMLRemarkSerializer.h
index 69b8f9f..af9d809 100644
--- a/llvm/include/llvm/Remarks/YAMLRemarkSerializer.h
+++ b/llvm/include/llvm/Remarks/YAMLRemarkSerializer.h
@@ -16,7 +16,6 @@
#include "llvm/Remarks/RemarkSerializer.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/YAMLTraits.h"
-#include <optional>
namespace llvm {
namespace remarks {
diff --git a/llvm/include/llvm/Support/FormatVariadic.h b/llvm/include/llvm/Support/FormatVariadic.h
index 8565292..fdd448f 100644
--- a/llvm/include/llvm/Support/FormatVariadic.h
+++ b/llvm/include/llvm/Support/FormatVariadic.h
@@ -37,7 +37,6 @@
#include "llvm/Support/raw_ostream.h"
#include <array>
#include <cstddef>
-#include <optional>
#include <string>
#include <tuple>
#include <utility>
diff --git a/llvm/include/llvm/Support/Jobserver.h b/llvm/include/llvm/Support/Jobserver.h
index 6bee3b5..3c0c045 100644
--- a/llvm/include/llvm/Support/Jobserver.h
+++ b/llvm/include/llvm/Support/Jobserver.h
@@ -68,7 +68,6 @@
#include "llvm/ADT/StringRef.h"
#include <memory>
-#include <string>
namespace llvm {
diff --git a/llvm/include/llvm/Support/SpecialCaseList.h b/llvm/include/llvm/Support/SpecialCaseList.h
index cb8e568de..dee4db5 100644
--- a/llvm/include/llvm/Support/SpecialCaseList.h
+++ b/llvm/include/llvm/Support/SpecialCaseList.h
@@ -126,15 +126,16 @@ protected:
SpecialCaseList &operator=(SpecialCaseList const &) = delete;
private:
+ using Match = std::pair<StringRef, unsigned>;
+ static constexpr Match NotMatched = {"", 0};
+
// Lagacy v1 matcher.
class RegexMatcher {
public:
LLVM_ABI Error insert(StringRef Pattern, unsigned LineNumber);
LLVM_ABI void preprocess(bool BySize);
- LLVM_ABI void
- match(StringRef Query,
- llvm::function_ref<void(StringRef Rule, unsigned LineNo)> Cb) const;
+ LLVM_ABI Match match(StringRef Query) const;
struct Reg {
Reg(StringRef Name, unsigned LineNo, Regex &&Rg)
@@ -152,9 +153,7 @@ private:
LLVM_ABI Error insert(StringRef Pattern, unsigned LineNumber);
LLVM_ABI void preprocess(bool BySize);
- LLVM_ABI void
- match(StringRef Query,
- llvm::function_ref<void(StringRef Rule, unsigned LineNo)> Cb) const;
+ LLVM_ABI Match match(StringRef Query) const;
struct Glob {
Glob(StringRef Name, unsigned LineNo, GlobPattern &&Pattern)
@@ -168,11 +167,10 @@ private:
RadixTree<iterator_range<StringRef::const_iterator>,
RadixTree<iterator_range<StringRef::const_reverse_iterator>,
- SmallVector<const GlobMatcher::Glob *, 1>>>
+ SmallVector<int, 1>>>
PrefixSuffixToGlob;
- RadixTree<iterator_range<StringRef::const_iterator>,
- SmallVector<const GlobMatcher::Glob *, 1>>
+ RadixTree<iterator_range<StringRef::const_iterator>, SmallVector<int, 1>>
SubstrToGlob;
};
@@ -184,14 +182,10 @@ private:
LLVM_ABI Error insert(StringRef Pattern, unsigned LineNumber);
LLVM_ABI void preprocess(bool BySize);
- LLVM_ABI void
- match(StringRef Query,
- llvm::function_ref<void(StringRef Rule, unsigned LineNo)> Cb) const;
+ LLVM_ABI Match match(StringRef Query) const;
LLVM_ABI bool matchAny(StringRef Query) const {
- bool R = false;
- match(Query, [&](StringRef, unsigned) { R = true; });
- return R;
+ return match(Query) != NotMatched;
}
std::variant<RegexMatcher, GlobMatcher> M;
@@ -201,17 +195,22 @@ private:
using SectionEntries = StringMap<StringMap<Matcher>>;
protected:
- struct Section {
+ class Section {
+ public:
Section(StringRef Str, unsigned FileIdx, bool UseGlobs)
: SectionMatcher(UseGlobs, /*RemoveDotSlash=*/false), SectionStr(Str),
FileIdx(FileIdx) {}
Section(Section &&) = default;
- Matcher SectionMatcher;
- SectionEntries Entries;
- std::string SectionStr;
- unsigned FileIdx;
+ // Return name of the section, its entire string in [].
+ StringRef name() const { return SectionStr; }
+
+ // Returns true if string 'Name' matches section name interpreted as a glob.
+ LLVM_ABI bool matchName(StringRef Name) const;
+
+ // Return sequence number of the file where this section is defined.
+ unsigned fileIndex() const { return FileIdx; }
// Helper method to search by Prefix, Query, and Category. Returns
// 1-based line number on which rule is defined, or 0 if there is no match.
@@ -223,11 +222,19 @@ protected:
LLVM_ABI StringRef getLongestMatch(StringRef Prefix, StringRef Query,
StringRef Category) const;
+ /// Returns true if the section has any entries for the given prefix.
+ LLVM_ABI bool hasPrefix(StringRef Prefix) const;
+
private:
friend class SpecialCaseList;
LLVM_ABI void preprocess(bool OrderBySize);
LLVM_ABI const SpecialCaseList::Matcher *
findMatcher(StringRef Prefix, StringRef Category) const;
+
+ Matcher SectionMatcher;
+ std::string SectionStr;
+ SectionEntries Entries;
+ unsigned FileIdx;
};
ArrayRef<const Section> sections() const { return Sections; }
diff --git a/llvm/include/llvm/Support/TypeSize.h b/llvm/include/llvm/Support/TypeSize.h
index 0a7ae15..421d661 100644
--- a/llvm/include/llvm/Support/TypeSize.h
+++ b/llvm/include/llvm/Support/TypeSize.h
@@ -20,7 +20,6 @@
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
-#include <algorithm>
#include <cassert>
#include <cstdint>
#include <type_traits>
diff --git a/llvm/include/llvm/Support/UnicodeCharRanges.h b/llvm/include/llvm/Support/UnicodeCharRanges.h
index 2b5fc83..03515cd 100644
--- a/llvm/include/llvm/Support/UnicodeCharRanges.h
+++ b/llvm/include/llvm/Support/UnicodeCharRanges.h
@@ -12,7 +12,6 @@
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
-#include <algorithm>
#define DEBUG_TYPE "unicode"
diff --git a/llvm/include/llvm/TableGen/DirectiveEmitter.h b/llvm/include/llvm/TableGen/DirectiveEmitter.h
index ce3e87e..2080f75 100644
--- a/llvm/include/llvm/TableGen/DirectiveEmitter.h
+++ b/llvm/include/llvm/TableGen/DirectiveEmitter.h
@@ -20,7 +20,6 @@
#include "llvm/Frontend/Directive/Spelling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/TableGen/Record.h"
-#include <algorithm>
#include <string>
#include <vector>
diff --git a/llvm/include/llvm/Transforms/Scalar/DropUnnecessaryAssumes.h b/llvm/include/llvm/Transforms/Scalar/DropUnnecessaryAssumes.h
index 4ff442ff..54ddcc0 100644
--- a/llvm/include/llvm/Transforms/Scalar/DropUnnecessaryAssumes.h
+++ b/llvm/include/llvm/Transforms/Scalar/DropUnnecessaryAssumes.h
@@ -19,7 +19,13 @@ namespace llvm {
struct DropUnnecessaryAssumesPass
: public PassInfoMixin<DropUnnecessaryAssumesPass> {
+ DropUnnecessaryAssumesPass(bool DropDereferenceable = false)
+ : DropDereferenceable(DropDereferenceable) {}
+
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+
+private:
+ bool DropDereferenceable;
};
} // end namespace llvm
diff --git a/llvm/include/llvm/Transforms/Scalar/JumpThreading.h b/llvm/include/llvm/Transforms/Scalar/JumpThreading.h
index a03a384..1a19eb9 100644
--- a/llvm/include/llvm/Transforms/Scalar/JumpThreading.h
+++ b/llvm/include/llvm/Transforms/Scalar/JumpThreading.h
@@ -24,7 +24,6 @@
#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Transforms/Utils/ValueMapper.h"
-#include <optional>
#include <utility>
namespace llvm {
diff --git a/llvm/include/llvm/Transforms/Scalar/Scalarizer.h b/llvm/include/llvm/Transforms/Scalar/Scalarizer.h
index 12513c2..35c9adb 100644
--- a/llvm/include/llvm/Transforms/Scalar/Scalarizer.h
+++ b/llvm/include/llvm/Transforms/Scalar/Scalarizer.h
@@ -20,7 +20,6 @@
#include "llvm/IR/PassManager.h"
#include "llvm/Support/Compiler.h"
-#include <optional>
namespace llvm {
diff --git a/llvm/include/llvm/Transforms/Utils/LowerVectorIntrinsics.h b/llvm/include/llvm/Transforms/Utils/LowerVectorIntrinsics.h
index cb48bb0..19b573d 100644
--- a/llvm/include/llvm/Transforms/Utils/LowerVectorIntrinsics.h
+++ b/llvm/include/llvm/Transforms/Utils/LowerVectorIntrinsics.h
@@ -14,7 +14,6 @@
#define LLVM_TRANSFORMS_UTILS_LOWERVECTORINTRINSICS_H
#include <cstdint>
-#include <optional>
namespace llvm {
diff --git a/llvm/include/llvm/Transforms/Utils/SplitModuleByCategory.h b/llvm/include/llvm/Transforms/Utils/SplitModuleByCategory.h
index cfcd161..47aa2ff 100644
--- a/llvm/include/llvm/Transforms/Utils/SplitModuleByCategory.h
+++ b/llvm/include/llvm/Transforms/Utils/SplitModuleByCategory.h
@@ -16,7 +16,6 @@
#include <memory>
#include <optional>
-#include <string>
namespace llvm {
diff --git a/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/InstrMaps.h b/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/InstrMaps.h
index 4385df5..0503966 100644
--- a/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/InstrMaps.h
+++ b/llvm/include/llvm/Transforms/Vectorize/SandboxVectorizer/InstrMaps.h
@@ -19,7 +19,6 @@
#include "llvm/Support/Casting.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Vectorize/SandboxVectorizer/VecUtils.h"
-#include <algorithm>
namespace llvm::sandboxir {
diff --git a/llvm/include/llvm/XRay/FDRRecordConsumer.h b/llvm/include/llvm/XRay/FDRRecordConsumer.h
index 13bb711..4ff65f0 100644
--- a/llvm/include/llvm/XRay/FDRRecordConsumer.h
+++ b/llvm/include/llvm/XRay/FDRRecordConsumer.h
@@ -11,7 +11,6 @@
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Error.h"
#include "llvm/XRay/FDRRecords.h"
-#include <algorithm>
#include <memory>
#include <vector>
diff --git a/llvm/lib/Analysis/CMakeLists.txt b/llvm/lib/Analysis/CMakeLists.txt
index 88ebd65..bff9b62 100644
--- a/llvm/lib/Analysis/CMakeLists.txt
+++ b/llvm/lib/Analysis/CMakeLists.txt
@@ -89,7 +89,6 @@ add_llvm_component_library(LLVMAnalysis
InlineCost.cpp
InlineAdvisor.cpp
InlineOrder.cpp
- InlineSizeEstimatorAnalysis.cpp
InstCount.cpp
InstructionPrecedenceTracking.cpp
InstructionSimplify.cpp
diff --git a/llvm/lib/Analysis/DXILResource.cpp b/llvm/lib/Analysis/DXILResource.cpp
index 27114e0..033f516 100644
--- a/llvm/lib/Analysis/DXILResource.cpp
+++ b/llvm/lib/Analysis/DXILResource.cpp
@@ -23,7 +23,6 @@
#include "llvm/Support/DXILABI.h"
#include "llvm/Support/FormatVariadic.h"
#include <cstdint>
-#include <optional>
#define DEBUG_TYPE "dxil-resource"
diff --git a/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp b/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp
index 67e38ab..d2be805 100644
--- a/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp
+++ b/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp
@@ -16,7 +16,6 @@
#include "llvm/ADT/BitVector.h"
#include "llvm/Analysis/CallGraph.h"
-#include "llvm/Analysis/InlineSizeEstimatorAnalysis.h"
#include "llvm/Analysis/MLInlineAdvisor.h"
#include "llvm/Analysis/ModelUnderTrainingRunner.h"
#include "llvm/Analysis/NoInferenceModelRunner.h"
@@ -89,9 +88,6 @@ struct InlineEvent {
/// error, even if AdvisedDecision were true, otherwise it agrees with
/// AdvisedDecision.
bool Effect = false;
-
- /// What the change in size was: size_after - size_before
- int64_t Reward = 0;
};
/// Collect data we may use for training a model.
@@ -150,31 +146,15 @@ public:
GetModelRunner,
std::function<bool(CallBase &)> GetDefaultAdvice);
- size_t getTotalSizeEstimate();
-
- void updateNativeSizeEstimate(int64_t Change) {
- *CurrentNativeSize += Change;
- }
- void resetNativeSize(Function *F) {
- PreservedAnalyses PA = PreservedAnalyses::all();
- PA.abandon<InlineSizeEstimatorAnalysis>();
- FAM.invalidate(*F, PA);
- }
-
std::unique_ptr<MLInlineAdvice>
getAdviceFromModel(CallBase &CB, OptimizationRemarkEmitter &ORE) override;
- std::optional<size_t> getNativeSizeEstimate(const Function &F) const;
-
private:
bool isLogging() const { return !!Logger; }
std::unique_ptr<MLInlineAdvice> getMandatoryAdviceImpl(CallBase &CB) override;
const bool IsDoingInference;
std::unique_ptr<TrainingLogger> Logger;
-
- const std::optional<int32_t> InitialNativeSize;
- std::optional<int32_t> CurrentNativeSize;
};
/// A variant of MLInlineAdvice that tracks all non-trivial inlining
@@ -183,13 +163,9 @@ class LoggingMLInlineAdvice : public MLInlineAdvice {
public:
LoggingMLInlineAdvice(DevelopmentModeMLInlineAdvisor *Advisor, CallBase &CB,
OptimizationRemarkEmitter &ORE, bool Recommendation,
- TrainingLogger &Logger,
- std::optional<size_t> CallerSizeEstimateBefore,
- std::optional<size_t> CalleeSizeEstimateBefore,
- bool DefaultDecision, bool Mandatory = false)
+ TrainingLogger &Logger, bool DefaultDecision,
+ bool Mandatory = false)
: MLInlineAdvice(Advisor, CB, ORE, Recommendation), Logger(Logger),
- CallerSizeEstimateBefore(CallerSizeEstimateBefore),
- CalleeSizeEstimateBefore(CalleeSizeEstimateBefore),
DefaultDecision(DefaultDecision), Mandatory(Mandatory) {}
virtual ~LoggingMLInlineAdvice() = default;
@@ -200,59 +176,35 @@ private:
}
void recordInliningImpl() override {
MLInlineAdvice::recordInliningImpl();
- getAdvisor()->resetNativeSize(Caller);
- int Reward = std::numeric_limits<int>::max();
- if (InlineSizeEstimatorAnalysis::isEvaluatorRequested() &&
- !getAdvisor()->isForcedToStop()) {
- int NativeSizeAfter = *getAdvisor()->getNativeSizeEstimate(*Caller) +
- *CalleeSizeEstimateBefore;
- Reward = NativeSizeAfter -
- (*CallerSizeEstimateBefore + *CalleeSizeEstimateBefore);
- getAdvisor()->updateNativeSizeEstimate(Reward);
- }
- log(Reward, /*Success=*/true);
+ log(/*Success=*/true);
}
void recordInliningWithCalleeDeletedImpl() override {
MLInlineAdvice::recordInliningWithCalleeDeletedImpl();
- getAdvisor()->resetNativeSize(Caller);
- if (InlineSizeEstimatorAnalysis::isEvaluatorRequested() &&
- !getAdvisor()->isForcedToStop()) {
- int NativeSizeAfter = *getAdvisor()->getNativeSizeEstimate(*Caller);
- int Reward = NativeSizeAfter -
- (*CallerSizeEstimateBefore + *CalleeSizeEstimateBefore);
- getAdvisor()->updateNativeSizeEstimate(Reward);
- log(Reward, /*Success=*/true);
- } else {
- log(NoReward, /*Success=*/true);
- }
+ log(/*Success=*/true);
}
void recordUnsuccessfulInliningImpl(const InlineResult &Result) override {
MLInlineAdvice::recordUnsuccessfulInliningImpl(Result);
- log(NoReward, /*Success=*/false);
+ log(/*Success=*/false);
}
void recordUnattemptedInliningImpl() override {
MLInlineAdvice::recordUnattemptedInliningImpl();
- log(NoReward, /*Success=*/false);
+ log(/*Success=*/false);
}
- void log(int64_t Reward, bool Success) {
+ void log(bool Success) {
if (Mandatory)
return;
InlineEvent Event;
Event.AdvisedDecision = isInliningRecommended();
Event.DefaultDecision = DefaultDecision;
Event.Effect = Success;
- Event.Reward = Reward;
Logger.logInlineEvent(Event, getAdvisor()->getModelRunner());
}
- static const int64_t NoReward = 0;
TrainingLogger &Logger;
- const std::optional<size_t> CallerSizeEstimateBefore;
- const std::optional<size_t> CalleeSizeEstimateBefore;
const int64_t DefaultDecision;
const int64_t Mandatory;
};
@@ -296,9 +248,9 @@ TrainingLogger::TrainingLogger(StringRef LogFileName,
if (EC)
dbgs() << (EC.message() + ":" + TrainingLog);
- L = std::make_unique<Logger>(
- std::move(OS), FT, TensorSpec::createSpec<int64_t>(RewardName, {1}),
- InlineSizeEstimatorAnalysis::isEvaluatorRequested());
+ L = std::make_unique<Logger>(std::move(OS), FT,
+ TensorSpec::createSpec<int64_t>(RewardName, {1}),
+ false);
L->switchContext("");
}
@@ -326,8 +278,6 @@ void TrainingLogger::logInlineEvent(const InlineEvent &Event,
L->logTensorValue(DecisionPos,
reinterpret_cast<const char *>(&Event.AdvisedDecision));
L->endObservation();
- if (InlineSizeEstimatorAnalysis::isEvaluatorRequested())
- L->logReward(Event.Reward);
// For debugging / later use
Effects.push_back(Event.Effect);
@@ -340,9 +290,7 @@ DevelopmentModeMLInlineAdvisor::DevelopmentModeMLInlineAdvisor(
GetModelRunner,
std::function<bool(CallBase &)> GetDefaultAdvice)
: MLInlineAdvisor(M, MAM, GetModelRunner, GetDefaultAdvice),
- IsDoingInference(isa<ModelUnderTrainingRunner>(getModelRunner())),
- InitialNativeSize(isLogging() ? getTotalSizeEstimate() : 0),
- CurrentNativeSize(InitialNativeSize) {
+ IsDoingInference(isa<ModelUnderTrainingRunner>(getModelRunner())) {
// We cannot have the case of neither inference nor logging.
if (!TrainingLog.empty())
Logger = std::make_unique<TrainingLogger>(
@@ -351,29 +299,12 @@ DevelopmentModeMLInlineAdvisor::DevelopmentModeMLInlineAdvisor(
assert(IsDoingInference || isLogging());
}
-std::optional<size_t>
-DevelopmentModeMLInlineAdvisor::getNativeSizeEstimate(const Function &F) const {
- if (!InlineSizeEstimatorAnalysis::isEvaluatorRequested())
- return std::nullopt;
- auto &R =
- FAM.getResult<InlineSizeEstimatorAnalysis>(const_cast<Function &>(F));
- if (!R) {
- F.getParent()->getContext().emitError(
- "Native size estimator is not present.");
- return 0;
- }
- return *R;
-}
-
std::unique_ptr<MLInlineAdvice>
DevelopmentModeMLInlineAdvisor::getMandatoryAdviceImpl(CallBase &CB) {
return std::make_unique<LoggingMLInlineAdvice>(
/*Advisor=*/this,
/*CB=*/CB, /*ORE=*/getCallerORE(CB), /*Recommendation=*/true,
/*Logger=*/*Logger,
- /*CallerSizeEstimateBefore=*/getNativeSizeEstimate(*CB.getCaller()),
- /*CalleeSizeEstimateBefore=*/
- getNativeSizeEstimate(*CB.getCalledFunction()),
/*DefaultDecision=*/true, /*Mandatory*/ true);
}
@@ -391,24 +322,9 @@ DevelopmentModeMLInlineAdvisor::getAdviceFromModel(
/*Advisor=*/this,
/*CB=*/CB, /*ORE=*/ORE, /*Recommendation=*/Recommendation,
/*Logger=*/*Logger,
- /*CallerSizeEstimateBefore=*/getNativeSizeEstimate(*CB.getCaller()),
- /*CalleeSizeEstimateBefore=*/
- getNativeSizeEstimate(*CB.getCalledFunction()),
/*DefaultDecision=*/DefaultAdvice);
}
-size_t DevelopmentModeMLInlineAdvisor::getTotalSizeEstimate() {
- if (!InlineSizeEstimatorAnalysis::isEvaluatorRequested())
- return 0;
- size_t Ret = 0;
- for (auto &F : M) {
- if (F.isDeclaration())
- continue;
- Ret += *getNativeSizeEstimate(F);
- }
- return Ret;
-}
-
std::unique_ptr<InlineAdvisor> llvm::getDevelopmentModeAdvisor(
Module &M, ModuleAnalysisManager &MAM,
std::function<bool(CallBase &)> GetDefaultAdvice) {
diff --git a/llvm/lib/Analysis/InlineSizeEstimatorAnalysis.cpp b/llvm/lib/Analysis/InlineSizeEstimatorAnalysis.cpp
deleted file mode 100644
index fc635726a..0000000
--- a/llvm/lib/Analysis/InlineSizeEstimatorAnalysis.cpp
+++ /dev/null
@@ -1,281 +0,0 @@
-//===- InlineSizeEstimatorAnalysis.cpp - IR to native size from ML model --===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This implements feature and label extraction for offline supervised learning
-// of a IR to native size model.
-//
-//===----------------------------------------------------------------------===//
-#include "llvm/Analysis/InlineSizeEstimatorAnalysis.h"
-
-#ifdef LLVM_HAVE_TFLITE
-#include "llvm/Analysis/Utils/TFUtils.h"
-#endif
-#include "llvm/IR/Function.h"
-#include "llvm/IR/PassManager.h"
-#include "llvm/Support/raw_ostream.h"
-
-using namespace llvm;
-
-AnalysisKey InlineSizeEstimatorAnalysis::Key;
-
-#ifdef LLVM_HAVE_TFLITE
-#include "llvm/Analysis/LoopInfo.h"
-#include "llvm/Analysis/TargetLibraryInfo.h"
-#include "llvm/Analysis/TargetTransformInfo.h"
-#include "llvm/IR/BasicBlock.h"
-#include "llvm/IR/Dominators.h"
-#include "llvm/IR/Instructions.h"
-#include "llvm/Support/Casting.h"
-#include "llvm/Support/CommandLine.h"
-#include <algorithm>
-#include <deque>
-#include <optional>
-
-static cl::opt<std::string> TFIR2NativeModelPath(
- "ml-inliner-ir2native-model", cl::Hidden,
- cl::desc("Path to saved model evaluating native size from IR."));
-
-#define DEBUG_TYPE "inline-size-estimator"
-namespace {
-unsigned getMaxInstructionID() {
-#define LAST_OTHER_INST(NR) return NR;
-#include "llvm/IR/Instruction.def"
-}
-
-class IRToNativeSizeLearning {
-public:
- enum class NamedFeatureIndex : size_t {
- InitialSize,
- Blocks,
- Calls,
- IsLocal,
- IsLinkOnceODR,
- IsLinkOnce,
- Loops,
- MaxLoopDepth,
- MaxDomTreeLevel,
-
- NumNamedFeatures
- };
- static const size_t NumNamedFeatures =
- static_cast<size_t>(NamedFeatureIndex::NumNamedFeatures);
- struct FunctionFeatures {
- static const size_t FeatureCount;
-
- std::array<int32_t, NumNamedFeatures> NamedFeatures = {0};
- std::vector<int32_t> InstructionHistogram;
- std::vector<int32_t> InstructionPairHistogram;
-
- void fillTensor(int32_t *Ptr) const;
- int32_t &operator[](NamedFeatureIndex Pos) {
- return NamedFeatures[static_cast<size_t>(Pos)];
- }
- };
- IRToNativeSizeLearning() = default;
-
- static FunctionFeatures getFunctionFeatures(Function &F,
- FunctionAnalysisManager &FAM);
-};
-
-// This is a point in time - we determined including these pairs of
-// consecutive instructions (in the IR layout available at inline time) as
-// features improves the model performance. We want to move away from manual
-// feature selection.
-// The array is given in opcode pairs rather than labels because 1) labels
-// weren't readily available, and 2) the successions were hand - extracted.
-//
-// This array must be sorted.
-static const std::array<std::pair<size_t, size_t>, 137>
- ImportantInstructionSuccessions{
- {{1, 1}, {1, 4}, {1, 5}, {1, 7}, {1, 8}, {1, 9}, {1, 11},
- {1, 12}, {1, 13}, {1, 14}, {1, 18}, {1, 20}, {1, 22}, {1, 24},
- {1, 25}, {1, 26}, {1, 27}, {1, 28}, {1, 29}, {1, 30}, {1, 31},
- {1, 32}, {1, 33}, {1, 34}, {1, 39}, {1, 40}, {1, 42}, {1, 45},
- {2, 1}, {2, 2}, {2, 13}, {2, 28}, {2, 29}, {2, 32}, {2, 33},
- {2, 34}, {2, 38}, {2, 48}, {2, 49}, {2, 53}, {2, 55}, {2, 56},
- {13, 2}, {13, 13}, {13, 26}, {13, 33}, {13, 34}, {13, 56}, {15, 27},
- {28, 2}, {28, 48}, {28, 53}, {29, 2}, {29, 33}, {29, 56}, {31, 31},
- {31, 33}, {31, 34}, {31, 49}, {32, 1}, {32, 2}, {32, 13}, {32, 15},
- {32, 28}, {32, 29}, {32, 32}, {32, 33}, {32, 34}, {32, 39}, {32, 40},
- {32, 48}, {32, 49}, {32, 53}, {32, 56}, {33, 1}, {33, 2}, {33, 32},
- {33, 33}, {33, 34}, {33, 49}, {33, 53}, {33, 56}, {34, 1}, {34, 2},
- {34, 32}, {34, 33}, {34, 34}, {34, 49}, {34, 53}, {34, 56}, {38, 34},
- {39, 57}, {40, 34}, {47, 15}, {47, 49}, {48, 2}, {48, 34}, {48, 56},
- {49, 1}, {49, 2}, {49, 28}, {49, 32}, {49, 33}, {49, 34}, {49, 39},
- {49, 49}, {49, 56}, {53, 1}, {53, 2}, {53, 28}, {53, 34}, {53, 53},
- {53, 57}, {55, 1}, {55, 28}, {55, 34}, {55, 53}, {55, 55}, {55, 56},
- {56, 1}, {56, 2}, {56, 7}, {56, 13}, {56, 32}, {56, 33}, {56, 34},
- {56, 49}, {56, 53}, {56, 56}, {56, 64}, {57, 34}, {57, 56}, {57, 57},
- {64, 1}, {64, 64}, {65, 1}, {65, 65}}};
-
-// We have: 9 calculated features (the features here); 1 feature for each
-// instruction opcode; and 1 feature for each manually-identified sequence.
-// For the latter 2, we build a histogram: we count the number of
-// occurrences of each instruction opcode or succession of instructions,
-// respectively.
-// Note that instruction opcodes start from 1. For convenience, we also have an
-// always 0 feature for the '0' opcode, hence the extra 1.
-const size_t IRToNativeSizeLearning::FunctionFeatures::FeatureCount =
- ImportantInstructionSuccessions.size() + getMaxInstructionID() + 1 +
- IRToNativeSizeLearning::NumNamedFeatures;
-
-size_t getSize(Function &F, TargetTransformInfo &TTI) {
- size_t Ret = 0;
- for (const auto &BB : F)
- for (const auto &I : BB)
- Ret += TTI.getInstructionCost(
- &I, TargetTransformInfo::TargetCostKind::TCK_CodeSize)
- .getValue();
- return Ret;
-}
-
-size_t getSize(Function &F, FunctionAnalysisManager &FAM) {
- auto &TTI = FAM.getResult<TargetIRAnalysis>(F);
- return getSize(F, TTI);
-}
-
-unsigned getMaxDominatorTreeDepth(const Function &F,
- const DominatorTree &Tree) {
- unsigned Ret = 0;
- for (const auto &BB : F)
- if (const auto *TN = Tree.getNode(&BB))
- Ret = std::max(Ret, TN->getLevel());
- return Ret;
-}
-} // namespace
-
-IRToNativeSizeLearning::FunctionFeatures
-IRToNativeSizeLearning::getFunctionFeatures(Function &F,
- FunctionAnalysisManager &FAM) {
- assert(llvm::is_sorted(ImportantInstructionSuccessions) &&
- "expected function features are sorted");
-
- auto &DomTree = FAM.getResult<DominatorTreeAnalysis>(F);
- FunctionFeatures FF;
- size_t InstrCount = getMaxInstructionID() + 1;
- FF.InstructionHistogram.resize(InstrCount);
-
- FF.InstructionPairHistogram.resize(ImportantInstructionSuccessions.size());
-
- int StartID = 0;
- int LastID = StartID;
- auto getPairIndex = [](size_t a, size_t b) {
- auto I = llvm::find(ImportantInstructionSuccessions, std::make_pair(a, b));
- if (I == ImportantInstructionSuccessions.end())
- return -1;
- return static_cast<int>(
- std::distance(ImportantInstructionSuccessions.begin(), I));
- };
-
- // We don't want debug calls, because they'd just add noise.
- for (const auto &BB : F) {
- for (const auto &I : BB.instructionsWithoutDebug()) {
- auto ID = I.getOpcode();
-
- ++FF.InstructionHistogram[ID];
- int PairIndex = getPairIndex(LastID, ID);
- if (PairIndex >= 0)
- ++FF.InstructionPairHistogram[PairIndex];
- LastID = ID;
- if (isa<CallBase>(I))
- ++FF[NamedFeatureIndex::Calls];
- }
- }
-
- FF[NamedFeatureIndex::InitialSize] = getSize(F, FAM);
- FF[NamedFeatureIndex::IsLocal] = F.hasLocalLinkage();
- FF[NamedFeatureIndex::IsLinkOnceODR] = F.hasLinkOnceODRLinkage();
- FF[NamedFeatureIndex::IsLinkOnce] = F.hasLinkOnceLinkage();
- FF[NamedFeatureIndex::Blocks] = F.size();
- auto &LI = FAM.getResult<LoopAnalysis>(F);
- FF[NamedFeatureIndex::Loops] = std::distance(LI.begin(), LI.end());
- for (auto &L : LI)
- FF[NamedFeatureIndex::MaxLoopDepth] =
- std::max(FF[NamedFeatureIndex::MaxLoopDepth],
- static_cast<int32_t>(L->getLoopDepth()));
- FF[NamedFeatureIndex::MaxDomTreeLevel] = getMaxDominatorTreeDepth(F, DomTree);
- return FF;
-}
-
-void IRToNativeSizeLearning::FunctionFeatures::fillTensor(int32_t *Ptr) const {
- std::copy(NamedFeatures.begin(), NamedFeatures.end(), Ptr);
- Ptr += NamedFeatures.size();
- std::copy(InstructionHistogram.begin(), InstructionHistogram.end(), Ptr);
- Ptr += InstructionHistogram.size();
- std::copy(InstructionPairHistogram.begin(), InstructionPairHistogram.end(),
- Ptr);
-}
-
-bool InlineSizeEstimatorAnalysis::isEvaluatorRequested() {
- return !TFIR2NativeModelPath.empty();
-}
-
-InlineSizeEstimatorAnalysis::InlineSizeEstimatorAnalysis() {
- if (!isEvaluatorRequested()) {
- return;
- }
- std::vector<TensorSpec> InputSpecs{TensorSpec::createSpec<int32_t>(
- "serving_default_input_1",
- {1, static_cast<int64_t>(
- IRToNativeSizeLearning::FunctionFeatures::FeatureCount)})};
- std::vector<TensorSpec> OutputSpecs{
- TensorSpec::createSpec<float>("StatefulPartitionedCall", {1})};
- Evaluator = std::make_unique<TFModelEvaluator>(
- TFIR2NativeModelPath.getValue().c_str(), InputSpecs, OutputSpecs);
- if (!Evaluator || !Evaluator->isValid()) {
- Evaluator.reset();
- return;
- }
-}
-
-InlineSizeEstimatorAnalysis::Result
-InlineSizeEstimatorAnalysis::run(const Function &F,
- FunctionAnalysisManager &FAM) {
- if (!Evaluator)
- return std::nullopt;
- auto Features = IRToNativeSizeLearning::getFunctionFeatures(
- const_cast<Function &>(F), FAM);
- int32_t *V = Evaluator->getInput<int32_t>(0);
- Features.fillTensor(V);
- auto ER = Evaluator->evaluate();
- if (!ER)
- return std::nullopt;
- float Ret = *ER->getTensorValue<float>(0);
- if (Ret < 0.0)
- Ret = 0.0;
- return static_cast<size_t>(Ret);
-}
-
-InlineSizeEstimatorAnalysis::~InlineSizeEstimatorAnalysis() {}
-InlineSizeEstimatorAnalysis::InlineSizeEstimatorAnalysis(
- InlineSizeEstimatorAnalysis &&Other)
- : Evaluator(std::move(Other.Evaluator)) {}
-
-#else
-namespace llvm {
-class TFModelEvaluator {};
-} // namespace llvm
-InlineSizeEstimatorAnalysis::InlineSizeEstimatorAnalysis() = default;
-InlineSizeEstimatorAnalysis ::InlineSizeEstimatorAnalysis(
- InlineSizeEstimatorAnalysis &&) {}
-InlineSizeEstimatorAnalysis::~InlineSizeEstimatorAnalysis() = default;
-InlineSizeEstimatorAnalysis::Result
-InlineSizeEstimatorAnalysis::run(const Function &F,
- FunctionAnalysisManager &FAM) {
- return std::nullopt;
-}
-bool InlineSizeEstimatorAnalysis::isEvaluatorRequested() { return false; }
-#endif
-
-PreservedAnalyses
-InlineSizeEstimatorAnalysisPrinterPass::run(Function &F,
- FunctionAnalysisManager &AM) {
- OS << "[InlineSizeEstimatorAnalysis] size estimate for " << F.getName()
- << ": " << AM.getResult<InlineSizeEstimatorAnalysis>(F) << "\n";
- return PreservedAnalyses::all();
-}
diff --git a/llvm/lib/Analysis/TFLiteUtils.cpp b/llvm/lib/Analysis/TFLiteUtils.cpp
index 2762e22..fcef1c8 100644
--- a/llvm/lib/Analysis/TFLiteUtils.cpp
+++ b/llvm/lib/Analysis/TFLiteUtils.cpp
@@ -30,7 +30,6 @@
#include "tensorflow/lite/logger.h"
#include <cassert>
-#include <numeric>
#include <optional>
using namespace llvm;
diff --git a/llvm/lib/Analysis/TargetLibraryInfo.cpp b/llvm/lib/Analysis/TargetLibraryInfo.cpp
index 74f3a7d..f97abc9 100644
--- a/llvm/lib/Analysis/TargetLibraryInfo.cpp
+++ b/llvm/lib/Analysis/TargetLibraryInfo.cpp
@@ -15,33 +15,11 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/SystemLibraries.h"
#include "llvm/InitializePasses.h"
-#include "llvm/Support/CommandLine.h"
#include "llvm/TargetParser/Triple.h"
using namespace llvm;
-static cl::opt<TargetLibraryInfoImpl::VectorLibrary> ClVectorLibrary(
- "vector-library", cl::Hidden, cl::desc("Vector functions library"),
- cl::init(TargetLibraryInfoImpl::NoLibrary),
- cl::values(clEnumValN(TargetLibraryInfoImpl::NoLibrary, "none",
- "No vector functions library"),
- clEnumValN(TargetLibraryInfoImpl::Accelerate, "Accelerate",
- "Accelerate framework"),
- clEnumValN(TargetLibraryInfoImpl::DarwinLibSystemM,
- "Darwin_libsystem_m", "Darwin libsystem_m"),
- clEnumValN(TargetLibraryInfoImpl::LIBMVEC, "LIBMVEC",
- "GLIBC Vector Math library"),
- clEnumValN(TargetLibraryInfoImpl::MASSV, "MASSV",
- "IBM MASS vector library"),
- clEnumValN(TargetLibraryInfoImpl::SVML, "SVML",
- "Intel SVML library"),
- clEnumValN(TargetLibraryInfoImpl::SLEEFGNUABI, "sleefgnuabi",
- "SIMD Library for Evaluating Elementary Functions"),
- clEnumValN(TargetLibraryInfoImpl::ArmPL, "ArmPL",
- "Arm Performance Libraries"),
- clEnumValN(TargetLibraryInfoImpl::AMDLIBM, "AMDLIBM",
- "AMD vector math library")));
-
StringLiteral const TargetLibraryInfoImpl::StandardNames[LibFunc::NumLibFuncs] =
{
#define TLI_DEFINE_STRING
@@ -1392,15 +1370,15 @@ const VecDesc VecFuncs_AMDLIBM[] = {
void TargetLibraryInfoImpl::addVectorizableFunctionsFromVecLib(
enum VectorLibrary VecLib, const llvm::Triple &TargetTriple) {
switch (VecLib) {
- case Accelerate: {
+ case VectorLibrary::Accelerate: {
addVectorizableFunctions(VecFuncs_Accelerate);
break;
}
- case DarwinLibSystemM: {
+ case VectorLibrary::DarwinLibSystemM: {
addVectorizableFunctions(VecFuncs_DarwinLibSystemM);
break;
}
- case LIBMVEC: {
+ case VectorLibrary::LIBMVEC: {
switch (TargetTriple.getArch()) {
default:
break;
@@ -1415,15 +1393,15 @@ void TargetLibraryInfoImpl::addVectorizableFunctionsFromVecLib(
}
break;
}
- case MASSV: {
+ case VectorLibrary::MASSV: {
addVectorizableFunctions(VecFuncs_MASSV);
break;
}
- case SVML: {
+ case VectorLibrary::SVML: {
addVectorizableFunctions(VecFuncs_SVML);
break;
}
- case SLEEFGNUABI: {
+ case VectorLibrary::SLEEFGNUABI: {
switch (TargetTriple.getArch()) {
default:
break;
@@ -1439,7 +1417,7 @@ void TargetLibraryInfoImpl::addVectorizableFunctionsFromVecLib(
}
break;
}
- case ArmPL: {
+ case VectorLibrary::ArmPL: {
switch (TargetTriple.getArch()) {
default:
break;
@@ -1450,11 +1428,11 @@ void TargetLibraryInfoImpl::addVectorizableFunctionsFromVecLib(
}
break;
}
- case AMDLIBM: {
+ case VectorLibrary::AMDLIBM: {
addVectorizableFunctions(VecFuncs_AMDLIBM);
break;
}
- case NoLibrary:
+ case VectorLibrary::NoLibrary:
break;
}
}
diff --git a/llvm/lib/Analysis/TrainingLogger.cpp b/llvm/lib/Analysis/TrainingLogger.cpp
index 344ca92..39f79cf 100644
--- a/llvm/lib/Analysis/TrainingLogger.cpp
+++ b/llvm/lib/Analysis/TrainingLogger.cpp
@@ -23,7 +23,6 @@
#include "llvm/Support/raw_ostream.h"
#include <cassert>
-#include <numeric>
using namespace llvm;
diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
index 567acf7..fde48a3 100644
--- a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
@@ -1544,18 +1544,8 @@ void DwarfDebug::ensureAbstractEntityIsCreatedIfScoped(DwarfCompileUnit &CU,
}
static const DILocalScope *getRetainedNodeScope(const MDNode *N) {
- const DIScope *S;
- if (const auto *LV = dyn_cast<DILocalVariable>(N))
- S = LV->getScope();
- else if (const auto *L = dyn_cast<DILabel>(N))
- S = L->getScope();
- else if (const auto *IE = dyn_cast<DIImportedEntity>(N))
- S = IE->getScope();
- else
- llvm_unreachable("Unexpected retained node!");
-
// Ensure the scope is not a DILexicalBlockFile.
- return cast<DILocalScope>(S)->getNonLexicalBlockFileScope();
+ return DISubprogram::getRetainedNodeScope(N)->getNonLexicalBlockFileScope();
}
// Collect variable information from side table maintained by MF.
diff --git a/llvm/lib/CodeGen/BasicBlockSections.cpp b/llvm/lib/CodeGen/BasicBlockSections.cpp
index e317e1c..52e2909 100644
--- a/llvm/lib/CodeGen/BasicBlockSections.cpp
+++ b/llvm/lib/CodeGen/BasicBlockSections.cpp
@@ -183,8 +183,7 @@ updateBranches(MachineFunction &MF,
// clusters are ordered in increasing order of their IDs, with the "Exception"
// and "Cold" succeeding all other clusters.
// FuncClusterInfo represents the cluster information for basic blocks. It
-// maps from BBID of basic blocks to their cluster information. If this is
-// empty, it means unique sections for all basic blocks in the function.
+// maps from BBID of basic blocks to their cluster information.
static void
assignSections(MachineFunction &MF,
const DenseMap<UniqueBBID, BBClusterInfo> &FuncClusterInfo) {
@@ -197,10 +196,8 @@ assignSections(MachineFunction &MF,
for (auto &MBB : MF) {
// With the 'all' option, every basic block is placed in a unique section.
// With the 'list' option, every basic block is placed in a section
- // associated with its cluster, unless we want individual unique sections
- // for every basic block in this function (if FuncClusterInfo is empty).
- if (MF.getTarget().getBBSectionsType() == llvm::BasicBlockSection::All ||
- FuncClusterInfo.empty()) {
+ // associated with its cluster.
+ if (MF.getTarget().getBBSectionsType() == llvm::BasicBlockSection::All) {
// If unique sections are desired for all basic blocks of the function, we
// set every basic block's section ID equal to its original position in
// the layout (which is equal to its number). This ensures that basic
@@ -308,22 +305,22 @@ bool BasicBlockSections::handleBBSections(MachineFunction &MF) {
if (BBSectionsType == BasicBlockSection::List &&
hasInstrProfHashMismatch(MF))
return false;
- // Renumber blocks before sorting them. This is useful for accessing the
- // original layout positions and finding the original fallthroughs.
- MF.RenumberBlocks();
DenseMap<UniqueBBID, BBClusterInfo> FuncClusterInfo;
if (BBSectionsType == BasicBlockSection::List) {
- auto [HasProfile, ClusterInfo] =
- getAnalysis<BasicBlockSectionsProfileReaderWrapperPass>()
- .getClusterInfoForFunction(MF.getName());
- if (!HasProfile)
+ auto ClusterInfo = getAnalysis<BasicBlockSectionsProfileReaderWrapperPass>()
+ .getClusterInfoForFunction(MF.getName());
+ if (ClusterInfo.empty())
return false;
for (auto &BBClusterInfo : ClusterInfo) {
FuncClusterInfo.try_emplace(BBClusterInfo.BBID, BBClusterInfo);
}
}
+ // Renumber blocks before sorting them. This is useful for accessing the
+ // original layout positions and finding the original fallthroughs.
+ MF.RenumberBlocks();
+
MF.setBBSectionsType(BBSectionsType);
assignSections(MF, FuncClusterInfo);
diff --git a/llvm/lib/CodeGen/BasicBlockSectionsProfileReader.cpp b/llvm/lib/CodeGen/BasicBlockSectionsProfileReader.cpp
index 485b44ae..c234c0f 100644
--- a/llvm/lib/CodeGen/BasicBlockSectionsProfileReader.cpp
+++ b/llvm/lib/CodeGen/BasicBlockSectionsProfileReader.cpp
@@ -58,22 +58,24 @@ BasicBlockSectionsProfileReader::parseUniqueBBID(StringRef S) const {
}
bool BasicBlockSectionsProfileReader::isFunctionHot(StringRef FuncName) const {
- return getClusterInfoForFunction(FuncName).first;
+ return !getClusterInfoForFunction(FuncName).empty();
}
-std::pair<bool, SmallVector<BBClusterInfo>>
+SmallVector<BBClusterInfo>
BasicBlockSectionsProfileReader::getClusterInfoForFunction(
StringRef FuncName) const {
auto R = ProgramPathAndClusterInfo.find(getAliasName(FuncName));
- return R != ProgramPathAndClusterInfo.end()
- ? std::pair(true, R->second.ClusterInfo)
- : std::pair(false, SmallVector<BBClusterInfo>());
+ return R != ProgramPathAndClusterInfo.end() ? R->second.ClusterInfo
+ : SmallVector<BBClusterInfo>();
}
SmallVector<SmallVector<unsigned>>
BasicBlockSectionsProfileReader::getClonePathsForFunction(
StringRef FuncName) const {
- return ProgramPathAndClusterInfo.lookup(getAliasName(FuncName)).ClonePaths;
+ auto R = ProgramPathAndClusterInfo.find(getAliasName(FuncName));
+ return R != ProgramPathAndClusterInfo.end()
+ ? R->second.ClonePaths
+ : SmallVector<SmallVector<unsigned>>();
}
uint64_t BasicBlockSectionsProfileReader::getEdgeCount(
@@ -494,7 +496,7 @@ bool BasicBlockSectionsProfileReaderWrapperPass::isFunctionHot(
return BBSPR.isFunctionHot(FuncName);
}
-std::pair<bool, SmallVector<BBClusterInfo>>
+SmallVector<BBClusterInfo>
BasicBlockSectionsProfileReaderWrapperPass::getClusterInfoForFunction(
StringRef FuncName) const {
return BBSPR.getClusterInfoForFunction(FuncName);
diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp
index 0309e22..b6dd174 100644
--- a/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -1839,7 +1839,8 @@ bool CodeGenPrepare::unfoldPowerOf2Test(CmpInst *Cmp) {
/// lose; some adjustment may be wanted there.
///
/// Return true if any changes are made.
-static bool sinkCmpExpression(CmpInst *Cmp, const TargetLowering &TLI) {
+static bool sinkCmpExpression(CmpInst *Cmp, const TargetLowering &TLI,
+ const DataLayout &DL) {
if (TLI.hasMultipleConditionRegisters(EVT::getEVT(Cmp->getType())))
return false;
@@ -1847,6 +1848,18 @@ static bool sinkCmpExpression(CmpInst *Cmp, const TargetLowering &TLI) {
if (TLI.useSoftFloat() && isa<FCmpInst>(Cmp))
return false;
+ bool UsedInPhiOrCurrentBlock = any_of(Cmp->users(), [Cmp](User *U) {
+ return isa<PHINode>(U) ||
+ cast<Instruction>(U)->getParent() == Cmp->getParent();
+ });
+
+ // Avoid sinking larger than legal integer comparisons unless its ONLY used in
+ // another BB.
+ if (UsedInPhiOrCurrentBlock && Cmp->getOperand(0)->getType()->isIntegerTy() &&
+ Cmp->getOperand(0)->getType()->getScalarSizeInBits() >
+ DL.getLargestLegalIntTypeSizeInBits())
+ return false;
+
// Only insert a cmp in each block once.
DenseMap<BasicBlock *, CmpInst *> InsertedCmps;
@@ -2224,7 +2237,7 @@ bool CodeGenPrepare::optimizeURem(Instruction *Rem) {
}
bool CodeGenPrepare::optimizeCmp(CmpInst *Cmp, ModifyDT &ModifiedDT) {
- if (sinkCmpExpression(Cmp, *TLI))
+ if (sinkCmpExpression(Cmp, *TLI, *DL))
return true;
if (combineToUAddWithOverflow(Cmp, ModifiedDT))
diff --git a/llvm/lib/CodeGen/MLRegAllocEvictAdvisor.cpp b/llvm/lib/CodeGen/MLRegAllocEvictAdvisor.cpp
index a72c2c4..32b6c46 100644
--- a/llvm/lib/CodeGen/MLRegAllocEvictAdvisor.cpp
+++ b/llvm/lib/CodeGen/MLRegAllocEvictAdvisor.cpp
@@ -83,13 +83,6 @@ static cl::opt<std::string> ModelUnderTraining(
"regalloc-model", cl::Hidden,
cl::desc("The model being trained for register allocation eviction"));
-static cl::opt<bool> EnableDevelopmentFeatures(
- "regalloc-enable-development-features", cl::Hidden,
- cl::desc("Whether or not to enable features under development for the ML "
- "regalloc advisor"));
-
-#else
-static const bool EnableDevelopmentFeatures = false;
#endif // #ifdef LLVM_HAVE_TFLITE
/// The score injection pass.
@@ -212,23 +205,6 @@ static const std::vector<int64_t> PerLiveRangeShape{1, NumberOfInterferences};
"lowest stage of an interval in this LR") \
M(float, progress, {1}, "ratio of current queue size to initial size")
-#ifdef LLVM_HAVE_TFLITE
-#define RA_EVICT_FIRST_DEVELOPMENT_FEATURE(M) \
- M(int64_t, instructions, InstructionsShape, \
- "Opcodes of the instructions covered by the eviction problem")
-
-#define RA_EVICT_REST_DEVELOPMENT_FEATURES(M) \
- M(int64_t, instructions_mapping, InstructionsMappingShape, \
- "A binary matrix mapping LRs to instruction opcodes") \
- M(float, mbb_frequencies, MBBFrequencyShape, \
- "A vector of machine basic block frequencies") \
- M(int64_t, mbb_mapping, InstructionsShape, \
- "A vector of indices mapping instructions to MBBs")
-#else
-#define RA_EVICT_FIRST_DEVELOPMENT_FEATURE(M)
-#define RA_EVICT_REST_DEVELOPMENT_FEATURES(M)
-#endif
-
// The model learns to pick one of the mask == 1 interferences. This is the
// name of the output tensor. The contract with the model is that the output
// will be guaranteed to be to a mask == 1 position. Using a macro here to
@@ -242,12 +218,6 @@ enum FeatureIDs {
#define _FEATURE_IDX_SIMPLE(_, name, __, ___) name
#define _FEATURE_IDX(A, B, C, D) _FEATURE_IDX_SIMPLE(A, B, C, D),
RA_EVICT_FEATURES_LIST(_FEATURE_IDX) FeatureCount,
-#ifdef LLVM_HAVE_TFLITE
- RA_EVICT_FIRST_DEVELOPMENT_FEATURE(_FEATURE_IDX_SIMPLE) = FeatureCount,
-#else
- RA_EVICT_FIRST_DEVELOPMENT_FEATURE(_FEATURE_IDX)
-#endif // #ifdef LLVM_HAVE_TFLITE
- RA_EVICT_REST_DEVELOPMENT_FEATURES(_FEATURE_IDX) FeaturesWithDevelopmentCount
#undef _FEATURE_IDX
#undef _FEATURE_IDX_SIMPLE
};
@@ -268,11 +238,7 @@ void resetInputs(MLModelRunner &Runner) {
std::memset(Runner.getTensorUntyped(FeatureIDs::NAME), 0, \
getTotalSize<TYPE>(SHAPE));
RA_EVICT_FEATURES_LIST(_RESET)
- if (EnableDevelopmentFeatures) {
- RA_EVICT_FIRST_DEVELOPMENT_FEATURE(_RESET)
- RA_EVICT_REST_DEVELOPMENT_FEATURES(_RESET)
#undef _RESET
- }
}
// Per-live interval components that get aggregated into the feature values
@@ -398,13 +364,7 @@ class ReleaseModeEvictionAdvisorProvider final
public:
ReleaseModeEvictionAdvisorProvider(LLVMContext &Ctx)
: RegAllocEvictionAdvisorProvider(AdvisorMode::Release, Ctx) {
- if (EnableDevelopmentFeatures) {
- InputFeatures = {RA_EVICT_FEATURES_LIST(
- _DECL_FEATURES) RA_EVICT_FIRST_DEVELOPMENT_FEATURE(_DECL_FEATURES)
- RA_EVICT_REST_DEVELOPMENT_FEATURES(_DECL_FEATURES)};
- } else {
- InputFeatures = {RA_EVICT_FEATURES_LIST(_DECL_FEATURES)};
- }
+ InputFeatures = {RA_EVICT_FEATURES_LIST(_DECL_FEATURES)};
}
// support for isa<> and dyn_cast.
static bool classof(const RegAllocEvictionAdvisorProvider *R) {
@@ -500,25 +460,12 @@ class DevelopmentModeEvictionAdvisorProvider final
public:
DevelopmentModeEvictionAdvisorProvider(LLVMContext &Ctx)
: RegAllocEvictionAdvisorProvider(AdvisorMode::Development, Ctx) {
- if (EnableDevelopmentFeatures) {
- InputFeatures = {RA_EVICT_FEATURES_LIST(
- _DECL_FEATURES) RA_EVICT_FIRST_DEVELOPMENT_FEATURE(_DECL_FEATURES)
- RA_EVICT_REST_DEVELOPMENT_FEATURES(_DECL_FEATURES)};
- TrainingInputFeatures = {
- RA_EVICT_FEATURES_LIST(_DECL_TRAIN_FEATURES)
- RA_EVICT_FIRST_DEVELOPMENT_FEATURE(_DECL_TRAIN_FEATURES)
- RA_EVICT_REST_DEVELOPMENT_FEATURES(_DECL_TRAIN_FEATURES)
- TensorSpec::createSpec<float>("action_discount", {1}),
- TensorSpec::createSpec<int32_t>("action_step_type", {1}),
- TensorSpec::createSpec<float>("action_reward", {1})};
- } else {
- InputFeatures = {RA_EVICT_FEATURES_LIST(_DECL_FEATURES)};
- TrainingInputFeatures = {
- RA_EVICT_FEATURES_LIST(_DECL_TRAIN_FEATURES)
- TensorSpec::createSpec<float>("action_discount", {1}),
- TensorSpec::createSpec<int32_t>("action_step_type", {1}),
- TensorSpec::createSpec<float>("action_reward", {1})};
- }
+ InputFeatures = {RA_EVICT_FEATURES_LIST(_DECL_FEATURES)};
+ TrainingInputFeatures = {
+ RA_EVICT_FEATURES_LIST(_DECL_TRAIN_FEATURES)
+ TensorSpec::createSpec<float>("action_discount", {1}),
+ TensorSpec::createSpec<int32_t>("action_step_type", {1}),
+ TensorSpec::createSpec<float>("action_reward", {1})};
if (ModelUnderTraining.empty() && TrainingLog.empty()) {
Ctx.emitError("Regalloc development mode should be requested with at "
"least logging enabled and/or a training model");
@@ -814,34 +761,6 @@ MCRegister MLEvictAdvisor::tryFindEvictionCandidate(
/*NumUrgent*/ 0.0, LRPosInfo);
assert(InitialQSize > 0.0 && "We couldn't have gotten here if we had "
"nothing to allocate initially.");
-#ifdef LLVM_HAVE_TFLITE
- if (EnableDevelopmentFeatures) {
- extractInstructionFeatures(
- LRPosInfo, Runner,
- [this](SlotIndex InputIndex) -> int {
- auto *CurrentMachineInstruction =
- LIS->getInstructionFromIndex(InputIndex);
- if (!CurrentMachineInstruction) {
- return -1;
- }
- return CurrentMachineInstruction->getOpcode();
- },
- [this](SlotIndex InputIndex) -> float {
- auto *CurrentMachineInstruction =
- LIS->getInstructionFromIndex(InputIndex);
- return MBFI.getBlockFreqRelativeToEntryBlock(
- CurrentMachineInstruction->getParent());
- },
- [this](SlotIndex InputIndex) -> MachineBasicBlock * {
- auto *CurrentMachineInstruction =
- LIS->getInstructionFromIndex(InputIndex);
- return CurrentMachineInstruction->getParent();
- },
- FeatureIDs::instructions, FeatureIDs::instructions_mapping,
- FeatureIDs::mbb_frequencies, FeatureIDs::mbb_mapping,
- LIS->getSlotIndexes()->getLastIndex());
- }
-#endif // #ifdef LLVM_HAVE_TFLITE
// Normalize the features.
for (auto &V : Largest)
V = V ? V : 1.0;
@@ -987,13 +906,6 @@ void MLEvictAdvisor::extractFeatures(
HintWeights += LIFC.HintWeights;
NumRematerializable += LIFC.IsRemat;
-
- if (EnableDevelopmentFeatures) {
- for (auto CurrentSegment : LI) {
- LRPosInfo.push_back(
- LRStartEndInfo{CurrentSegment.start, CurrentSegment.end, Pos});
- }
- }
}
size_t Size = 0;
if (!Intervals.empty()) {
@@ -1209,9 +1121,7 @@ int64_t DevelopmentModeEvictAdvisor::tryFindEvictionCandidatePosition(
Log->startObservation();
size_t CurrentFeature = 0;
- size_t FeatureCount = EnableDevelopmentFeatures
- ? FeatureIDs::FeaturesWithDevelopmentCount
- : FeatureIDs::FeatureCount;
+ size_t FeatureCount = FeatureIDs::FeatureCount;
for (; CurrentFeature < FeatureCount; ++CurrentFeature) {
Log->logTensorValue(CurrentFeature,
reinterpret_cast<const char *>(
diff --git a/llvm/lib/CodeGen/RegAllocGreedy.h b/llvm/lib/CodeGen/RegAllocGreedy.h
index 7f013d1..4affa27 100644
--- a/llvm/lib/CodeGen/RegAllocGreedy.h
+++ b/llvm/lib/CodeGen/RegAllocGreedy.h
@@ -33,7 +33,6 @@
#include "llvm/CodeGen/SpillPlacement.h"
#include "llvm/CodeGen/Spiller.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
-#include <algorithm>
#include <cstdint>
#include <memory>
#include <queue>
diff --git a/llvm/lib/CodeGen/RegisterCoalescer.cpp b/llvm/lib/CodeGen/RegisterCoalescer.cpp
index 99f7693..f93a7f2 100644
--- a/llvm/lib/CodeGen/RegisterCoalescer.cpp
+++ b/llvm/lib/CodeGen/RegisterCoalescer.cpp
@@ -79,9 +79,9 @@ static cl::opt<bool> EnableJoining("join-liveintervals",
cl::desc("Coalesce copies (default=true)"),
cl::init(true), cl::Hidden);
-static cl::opt<bool> UseTerminalRule("terminal-rule",
- cl::desc("Apply the terminal rule"),
- cl::init(false), cl::Hidden);
+static cl::opt<cl::boolOrDefault>
+ EnableTerminalRule("terminal-rule", cl::desc("Apply the terminal rule"),
+ cl::init(cl::BOU_UNSET), cl::Hidden);
/// Temporary flag to test critical edge unsplitting.
static cl::opt<bool> EnableJoinSplits(
@@ -134,6 +134,7 @@ class RegisterCoalescer : private LiveRangeEdit::Delegate {
SlotIndexes *SI = nullptr;
const MachineLoopInfo *Loops = nullptr;
RegisterClassInfo RegClassInfo;
+ bool UseTerminalRule = false;
/// Position and VReg of a PHI instruction during coalescing.
struct PHIValPos {
@@ -4320,6 +4321,11 @@ bool RegisterCoalescer::run(MachineFunction &fn) {
else
JoinGlobalCopies = (EnableGlobalCopies == cl::BOU_TRUE);
+ if (EnableTerminalRule == cl::BOU_UNSET)
+ UseTerminalRule = STI.enableTerminalRule();
+ else
+ UseTerminalRule = EnableTerminalRule == cl::BOU_TRUE;
+
// If there are PHIs tracked by debug-info, they will need updating during
// coalescing. Build an index of those PHIs to ease updating.
SlotIndexes *Slots = LIS->getSlotIndexes();
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index f144f17..4f2eb1e 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -18863,6 +18863,26 @@ SDValue DAGCombiner::visitFCOPYSIGN(SDNode *N) {
if (SimplifyDemandedBits(SDValue(N, 0)))
return SDValue(N, 0);
+ if (VT != N1.getValueType())
+ return SDValue();
+
+ // If this is equivalent to a disjoint or, replace it with one. This can
+ // happen if the sign operand is a sign mask (i.e., x << sign_bit_position).
+ if (DAG.SignBitIsZeroFP(N0) &&
+ DAG.computeKnownBits(N1).Zero.isMaxSignedValue()) {
+ // TODO: Just directly match the shift pattern. computeKnownBits is heavy
+ // for a such a narrowly targeted case.
+ EVT IntVT = VT.changeTypeToInteger();
+ // TODO: It appears to be profitable in some situations to unconditionally
+ // emit a fabs(n0) to perform this combine.
+ SDValue CastSrc0 = DAG.getNode(ISD::BITCAST, DL, IntVT, N0);
+ SDValue CastSrc1 = DAG.getNode(ISD::BITCAST, DL, IntVT, N1);
+
+ SDValue SignOr = DAG.getNode(ISD::OR, DL, IntVT, CastSrc0, CastSrc1,
+ SDNodeFlags::Disjoint);
+ return DAG.getNode(ISD::BITCAST, DL, VT, SignOr);
+ }
+
return SDValue();
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index 316aacd..a0baf82 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -4842,9 +4842,15 @@ void SelectionDAGLegalize::ConvertNodeToLibcall(SDNode *Node) {
RTLIB::Libcall LC = Node->getOpcode() == ISD::FSINCOS
? RTLIB::getSINCOS(VT)
: RTLIB::getSINCOSPI(VT);
- bool Expanded = DAG.expandMultipleResultFPLibCall(LC, Node, Results);
- if (!Expanded)
- llvm_unreachable("Expected scalar FSINCOS[PI] to expand to libcall!");
+ bool Expanded = DAG.expandMultipleResultFPLibCall(LC, Node, Results, VT);
+ if (!Expanded) {
+ DAG.getContext()->emitError(Twine("no libcall available for ") +
+ Node->getOperationName(&DAG));
+ SDValue Poison = DAG.getPOISON(VT);
+ Results.push_back(Poison);
+ Results.push_back(Poison);
+ }
+
break;
}
case ISD::FLOG:
@@ -4934,7 +4940,7 @@ void SelectionDAGLegalize::ConvertNodeToLibcall(SDNode *Node) {
EVT VT = Node->getValueType(0);
RTLIB::Libcall LC = Node->getOpcode() == ISD::FMODF ? RTLIB::getMODF(VT)
: RTLIB::getFREXP(VT);
- bool Expanded = DAG.expandMultipleResultFPLibCall(LC, Node, Results,
+ bool Expanded = DAG.expandMultipleResultFPLibCall(LC, Node, Results, VT,
/*CallRetResNo=*/0);
if (!Expanded)
llvm_unreachable("Expected scalar FFREXP/FMODF to expand to libcall!");
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
index 58983cb..29c4dac 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
@@ -1726,7 +1726,8 @@ void DAGTypeLegalizer::ExpandFloatRes_UnaryWithTwoFPResults(
SDNode *N, RTLIB::Libcall LC, std::optional<unsigned> CallRetResNo) {
assert(!N->isStrictFPOpcode() && "strictfp not implemented");
SmallVector<SDValue> Results;
- DAG.expandMultipleResultFPLibCall(LC, N, Results, CallRetResNo);
+ DAG.expandMultipleResultFPLibCall(LC, N, Results, N->getValueType(0),
+ CallRetResNo);
for (auto [ResNo, Res] : enumerate(Results)) {
SDValue Lo, Hi;
GetPairElements(Res, Lo, Hi);
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index 94751be5..f5a54497 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -1268,20 +1268,30 @@ void VectorLegalizer::Expand(SDNode *Node, SmallVectorImpl<SDValue> &Results) {
return;
break;
- case ISD::FSINCOS:
+
case ISD::FSINCOSPI: {
+ EVT VT = Node->getValueType(0);
+ RTLIB::Libcall LC = RTLIB::getSINCOSPI(VT);
+ if (LC != RTLIB::UNKNOWN_LIBCALL &&
+ DAG.expandMultipleResultFPLibCall(LC, Node, Results, VT))
+ return;
+
+ // TODO: Try to see if there's a narrower call available to use before
+ // scalarizing.
+ break;
+ }
+ case ISD::FSINCOS: {
+ // FIXME: Try to directly match vector case like fsincospi
EVT VT = Node->getValueType(0).getVectorElementType();
- RTLIB::Libcall LC = Node->getOpcode() == ISD::FSINCOS
- ? RTLIB::getSINCOS(VT)
- : RTLIB::getSINCOSPI(VT);
- if (DAG.expandMultipleResultFPLibCall(LC, Node, Results))
+ RTLIB::Libcall LC = RTLIB::getSINCOS(VT);
+ if (DAG.expandMultipleResultFPLibCall(LC, Node, Results, VT))
return;
break;
}
case ISD::FMODF: {
- RTLIB::Libcall LC =
- RTLIB::getMODF(Node->getValueType(0).getVectorElementType());
- if (DAG.expandMultipleResultFPLibCall(LC, Node, Results,
+ EVT VT = Node->getValueType(0).getVectorElementType();
+ RTLIB::Libcall LC = RTLIB::getMODF(VT);
+ if (DAG.expandMultipleResultFPLibCall(LC, Node, Results, VT,
/*CallRetResNo=*/0))
return;
break;
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 80bbfea..b5d502b 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -2514,18 +2514,20 @@ static bool canFoldStoreIntoLibCallOutputPointers(StoreSDNode *StoreNode,
bool SelectionDAG::expandMultipleResultFPLibCall(
RTLIB::Libcall LC, SDNode *Node, SmallVectorImpl<SDValue> &Results,
- std::optional<unsigned> CallRetResNo) {
- LLVMContext &Ctx = *getContext();
- EVT VT = Node->getValueType(0);
- unsigned NumResults = Node->getNumValues();
-
+ EVT CallVT, std::optional<unsigned> CallRetResNo) {
if (LC == RTLIB::UNKNOWN_LIBCALL)
return false;
- const char *LCName = TLI->getLibcallName(LC);
- if (!LCName)
+ EVT VT = Node->getValueType(0);
+
+ RTLIB::LibcallImpl Impl = TLI->getLibcallImpl(LC);
+ if (Impl == RTLIB::Unsupported)
return false;
+ StringRef LCName = TLI->getLibcallImplName(Impl);
+
+ // FIXME: This should not use TargetLibraryInfo. There should be
+ // RTLIB::Libcall entries for each used vector type, and directly matched.
auto getVecDesc = [&]() -> VecDesc const * {
for (bool Masked : {false, true}) {
if (VecDesc const *VD = getLibInfo().getVectorMappingInfo(
@@ -2538,9 +2540,34 @@ bool SelectionDAG::expandMultipleResultFPLibCall(
// For vector types, we must find a vector mapping for the libcall.
VecDesc const *VD = nullptr;
- if (VT.isVector() && !(VD = getVecDesc()))
+ if (VT.isVector() && !CallVT.isVector() && !(VD = getVecDesc()))
return false;
+ bool IsMasked = (VD && VD->isMasked()) ||
+ RTLIB::RuntimeLibcallsInfo::hasVectorMaskArgument(Impl);
+
+ // This wrapper function exists because getVectorMappingInfo works in terms of
+ // function names instead of RTLIB enums.
+
+ // FIXME: If we used a vector mapping, this assumes the calling convention of
+ // the vector function is the same as the scalar.
+
+ StringRef Name = VD ? VD->getVectorFnName() : LCName;
+
+ return expandMultipleResultFPLibCall(Name,
+ TLI->getLibcallImplCallingConv(Impl),
+ Node, Results, CallRetResNo, IsMasked);
+}
+
+// FIXME: This belongs in TargetLowering
+bool SelectionDAG::expandMultipleResultFPLibCall(
+ StringRef Name, CallingConv::ID CC, SDNode *Node,
+ SmallVectorImpl<SDValue> &Results, std::optional<unsigned> CallRetResNo,
+ bool IsMasked) {
+ LLVMContext &Ctx = *getContext();
+ EVT VT = Node->getValueType(0);
+ unsigned NumResults = Node->getNumValues();
+
// Find users of the node that store the results (and share input chains). The
// destination pointers can be used instead of creating stack allocations.
SDValue StoresInChain;
@@ -2598,7 +2625,7 @@ bool SelectionDAG::expandMultipleResultFPLibCall(
SDLoc DL(Node);
// Pass the vector mask (if required).
- if (VD && VD->isMasked()) {
+ if (IsMasked) {
EVT MaskVT = TLI->getSetCCResultType(getDataLayout(), Ctx, VT);
SDValue Mask = getBoolConstant(true, DL, MaskVT, VT);
Args.emplace_back(Mask, MaskVT.getTypeForEVT(Ctx));
@@ -2608,11 +2635,11 @@ bool SelectionDAG::expandMultipleResultFPLibCall(
? Node->getValueType(*CallRetResNo).getTypeForEVT(Ctx)
: Type::getVoidTy(Ctx);
SDValue InChain = StoresInChain ? StoresInChain : getEntryNode();
- SDValue Callee = getExternalSymbol(VD ? VD->getVectorFnName().data() : LCName,
- TLI->getPointerTy(getDataLayout()));
+ SDValue Callee =
+ getExternalSymbol(Name.data(), TLI->getPointerTy(getDataLayout()));
TargetLowering::CallLoweringInfo CLI(*this);
- CLI.setDebugLoc(DL).setChain(InChain).setLibCallee(
- TLI->getLibcallCallingConv(LC), RetType, Callee, std::move(Args));
+ CLI.setDebugLoc(DL).setChain(InChain).setLibCallee(CC, RetType, Callee,
+ std::move(Args));
auto [Call, CallChain] = TLI->LowerCallTo(CLI);
@@ -2920,6 +2947,34 @@ bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const {
return MaskedValueIsZero(Op, APInt::getSignMask(BitWidth), Depth);
}
+bool SelectionDAG::SignBitIsZeroFP(SDValue Op, unsigned Depth) const {
+ if (Depth >= MaxRecursionDepth)
+ return false; // Limit search depth.
+
+ unsigned Opc = Op.getOpcode();
+ switch (Opc) {
+ case ISD::FABS:
+ return true;
+ case ISD::AssertNoFPClass: {
+ FPClassTest NoFPClass =
+ static_cast<FPClassTest>(Op.getConstantOperandVal(1));
+
+ const FPClassTest TestMask = fcNan | fcNegative;
+ return (NoFPClass & TestMask) == TestMask;
+ }
+ case ISD::ARITH_FENCE:
+ return SignBitIsZeroFP(Op, Depth + 1);
+ case ISD::FEXP:
+ case ISD::FEXP2:
+ case ISD::FEXP10:
+ return Op->getFlags().hasNoNaNs();
+ default:
+ return false;
+ }
+
+ llvm_unreachable("covered opcode switch");
+}
+
/// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
/// this predicate to simplify operations downstream. Mask is known to be zero
/// for bits that V cannot have.
@@ -4121,6 +4176,25 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
Known.One.clearLowBits(LogOfAlign);
break;
}
+ case ISD::AssertNoFPClass: {
+ Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
+
+ FPClassTest NoFPClass =
+ static_cast<FPClassTest>(Op.getConstantOperandVal(1));
+ const FPClassTest NegativeTestMask = fcNan | fcNegative;
+ if ((NoFPClass & NegativeTestMask) == NegativeTestMask) {
+ // Cannot be negative.
+ Known.makeNonNegative();
+ }
+
+ const FPClassTest PositiveTestMask = fcNan | fcPositive;
+ if ((NoFPClass & PositiveTestMask) == PositiveTestMask) {
+ // Cannot be positive.
+ Known.makeNegative();
+ }
+
+ break;
+ }
case ISD::FGETSIGN:
// All bits are zero except the low bit.
Known.Zero.setBitsFrom(1);
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 88b0809..6a9022d 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -4638,6 +4638,12 @@ static std::optional<ConstantRange> getRange(const Instruction &I) {
return std::nullopt;
}
+static FPClassTest getNoFPClass(const Instruction &I) {
+ if (const auto *CB = dyn_cast<CallBase>(&I))
+ return CB->getRetNoFPClass();
+ return fcNone;
+}
+
void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
if (I.isAtomic())
return visitAtomicLoad(I);
@@ -9132,6 +9138,7 @@ void SelectionDAGBuilder::LowerCallTo(const CallBase &CB, SDValue Callee,
if (Result.first.getNode()) {
Result.first = lowerRangeToAssertZExt(DAG, CB, Result.first);
+ Result.first = lowerNoFPClassToAssertNoFPClass(DAG, CB, Result.first);
setValue(&CB, Result.first);
}
@@ -10718,6 +10725,16 @@ SDValue SelectionDAGBuilder::lowerRangeToAssertZExt(SelectionDAG &DAG,
return DAG.getMergeValues(Ops, SL);
}
+SDValue SelectionDAGBuilder::lowerNoFPClassToAssertNoFPClass(
+ SelectionDAG &DAG, const Instruction &I, SDValue Op) {
+ FPClassTest Classes = getNoFPClass(I);
+ if (Classes == fcNone)
+ return Op;
+
+ return DAG.getNode(ISD::AssertNoFPClass, SDLoc(Op), Op.getValueType(), Op,
+ DAG.getTargetConstant(Classes, SDLoc(), MVT::i32));
+}
+
/// Populate a CallLowerinInfo (into \p CLI) based on the properties of
/// the call being lowered.
///
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
index ed63bee..13e2daa 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
@@ -429,6 +429,10 @@ public:
SDValue lowerRangeToAssertZExt(SelectionDAG &DAG, const Instruction &I,
SDValue Op);
+ // Lower nofpclass attributes to AssertNoFPClass
+ SDValue lowerNoFPClassToAssertNoFPClass(SelectionDAG &DAG,
+ const Instruction &I, SDValue Op);
+
void populateCallLoweringInfo(TargetLowering::CallLoweringInfo &CLI,
const CallBase *Call, unsigned ArgIdx,
unsigned NumArgs, SDValue Callee,
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index 8bc5d2f..e78dfb1 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -2448,7 +2448,7 @@ bool SelectionDAGISel::IsLegalToFold(SDValue N, SDNode *U, SDNode *Root,
// a cycle in the scheduling graph.
// If the node has glue, walk down the graph to the "lowest" node in the
- // glueged set.
+ // glued set.
EVT VT = Root->getValueType(Root->getNumValues()-1);
while (VT == MVT::Glue) {
SDNode *GU = Root->getGluedUser();
diff --git a/llvm/lib/CodeGen/TargetInstrInfo.cpp b/llvm/lib/CodeGen/TargetInstrInfo.cpp
index 3c41bbe..7c89e51 100644
--- a/llvm/lib/CodeGen/TargetInstrInfo.cpp
+++ b/llvm/lib/CodeGen/TargetInstrInfo.cpp
@@ -60,7 +60,7 @@ TargetInstrInfo::~TargetInstrInfo() = default;
const TargetRegisterClass *
TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
- const TargetRegisterInfo *TRI) const {
+ const TargetRegisterInfo * /*RemoveMe*/) const {
if (OpNum >= MCID.getNumOperands())
return nullptr;
@@ -69,14 +69,14 @@ TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
// TODO: Remove isLookupPtrRegClass in favor of isLookupRegClassByHwMode
if (OpInfo.isLookupPtrRegClass())
- return TRI->getPointerRegClass(RegClass);
+ return TRI.getPointerRegClass(RegClass);
// Instructions like INSERT_SUBREG do not have fixed register classes.
if (RegClass < 0)
return nullptr;
// Otherwise just look it up normally.
- return TRI->getRegClass(RegClass);
+ return TRI.getRegClass(RegClass);
}
/// insertNoop - Insert a noop into the instruction stream at the specified
@@ -223,13 +223,11 @@ MachineInstr *TargetInstrInfo::commuteInstructionImpl(MachineInstr &MI,
// %1.sub = INST %1.sub(tied), %0.sub, implicit-def %1
SmallVector<unsigned> UpdateImplicitDefIdx;
if (HasDef && MI.hasImplicitDef()) {
- const TargetRegisterInfo *TRI =
- MI.getMF()->getSubtarget().getRegisterInfo();
for (auto [OpNo, MO] : llvm::enumerate(MI.implicit_operands())) {
Register ImplReg = MO.getReg();
if ((ImplReg.isVirtual() && ImplReg == Reg0) ||
(ImplReg.isPhysical() && Reg0.isPhysical() &&
- TRI->isSubRegisterEq(ImplReg, Reg0)))
+ TRI.isSubRegisterEq(ImplReg, Reg0)))
UpdateImplicitDefIdx.push_back(OpNo + MI.getNumExplicitOperands());
}
}
@@ -425,37 +423,35 @@ bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC,
unsigned SubIdx, unsigned &Size,
unsigned &Offset,
const MachineFunction &MF) const {
- const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
if (!SubIdx) {
- Size = TRI->getSpillSize(*RC);
+ Size = TRI.getSpillSize(*RC);
Offset = 0;
return true;
}
- unsigned BitSize = TRI->getSubRegIdxSize(SubIdx);
+ unsigned BitSize = TRI.getSubRegIdxSize(SubIdx);
// Convert bit size to byte size.
if (BitSize % 8)
return false;
- int BitOffset = TRI->getSubRegIdxOffset(SubIdx);
+ int BitOffset = TRI.getSubRegIdxOffset(SubIdx);
if (BitOffset < 0 || BitOffset % 8)
return false;
Size = BitSize / 8;
Offset = (unsigned)BitOffset / 8;
- assert(TRI->getSpillSize(*RC) >= (Offset + Size) && "bad subregister range");
+ assert(TRI.getSpillSize(*RC) >= (Offset + Size) && "bad subregister range");
if (!MF.getDataLayout().isLittleEndian()) {
- Offset = TRI->getSpillSize(*RC) - (Offset + Size);
+ Offset = TRI.getSpillSize(*RC) - (Offset + Size);
}
return true;
}
-void TargetInstrInfo::reMaterialize(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- Register DestReg, unsigned SubIdx,
- const MachineInstr &Orig,
- const TargetRegisterInfo &TRI) const {
+void TargetInstrInfo::reMaterialize(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator I, Register DestReg,
+ unsigned SubIdx, const MachineInstr &Orig,
+ const TargetRegisterInfo & /*Remove me*/) const {
MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
MBB.insert(I, MI);
@@ -726,7 +722,6 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
// actual load size is.
int64_t MemSize = 0;
const MachineFrameInfo &MFI = MF.getFrameInfo();
- const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
if (Flags & MachineMemOperand::MOStore) {
MemSize = MFI.getObjectSize(FI);
@@ -735,7 +730,7 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
int64_t OpSize = MFI.getObjectSize(FI);
if (auto SubReg = MI.getOperand(OpIdx).getSubReg()) {
- unsigned SubRegSize = TRI->getSubRegIdxSize(SubReg);
+ unsigned SubRegSize = TRI.getSubRegIdxSize(SubReg);
if (SubRegSize > 0 && !(SubRegSize % 8))
OpSize = SubRegSize / 8;
}
@@ -800,11 +795,11 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
// code.
BuildMI(*MBB, Pos, MI.getDebugLoc(), get(TargetOpcode::KILL)).add(MO);
} else {
- storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI,
+ storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, &TRI,
Register());
}
} else
- loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI, Register());
+ loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, &TRI, Register());
return &*--Pos;
}
@@ -880,8 +875,8 @@ static void transferImplicitOperands(MachineInstr *MI,
}
}
-void TargetInstrInfo::lowerCopy(MachineInstr *MI,
- const TargetRegisterInfo *TRI) const {
+void TargetInstrInfo::lowerCopy(
+ MachineInstr *MI, const TargetRegisterInfo * /*Remove me*/) const {
if (MI->allDefsAreDead()) {
MI->setDesc(get(TargetOpcode::KILL));
return;
@@ -911,7 +906,7 @@ void TargetInstrInfo::lowerCopy(MachineInstr *MI,
SrcMO.getReg().isPhysical() ? SrcMO.isRenamable() : false);
if (MI->getNumOperands() > 2)
- transferImplicitOperands(MI, TRI);
+ transferImplicitOperands(MI, &TRI);
MI->eraseFromParent();
}
@@ -1327,8 +1322,7 @@ void TargetInstrInfo::reassociateOps(
MachineFunction *MF = Root.getMF();
MachineRegisterInfo &MRI = MF->getRegInfo();
const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
- const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
- const TargetRegisterClass *RC = Root.getRegClassConstraint(0, TII, TRI);
+ const TargetRegisterClass *RC = Root.getRegClassConstraint(0, TII, &TRI);
MachineOperand &OpA = Prev.getOperand(OperandIndices[1]);
MachineOperand &OpB = Root.getOperand(OperandIndices[2]);
@@ -1704,8 +1698,7 @@ bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
// stack slot reference to depend on the instruction that does the
// modification.
const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
- const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
- return MI.modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI);
+ return MI.modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), &TRI);
}
// Provide a global flag for disabling the PreRA hazard recognizer that targets
@@ -1738,11 +1731,11 @@ CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
// Default implementation of getMemOperandWithOffset.
bool TargetInstrInfo::getMemOperandWithOffset(
const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset,
- bool &OffsetIsScalable, const TargetRegisterInfo *TRI) const {
+ bool &OffsetIsScalable, const TargetRegisterInfo * /*RemoveMe*/) const {
SmallVector<const MachineOperand *, 4> BaseOps;
LocationSize Width = LocationSize::precise(0);
if (!getMemOperandsWithOffsetWidth(MI, BaseOps, Offset, OffsetIsScalable,
- Width, TRI) ||
+ Width, &TRI) ||
BaseOps.size() != 1)
return false;
BaseOp = BaseOps.front();
@@ -1863,7 +1856,6 @@ std::optional<ParamLoadedValue>
TargetInstrInfo::describeLoadedValue(const MachineInstr &MI,
Register Reg) const {
const MachineFunction *MF = MI.getMF();
- const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
DIExpression *Expr = DIExpression::get(MF->getFunction().getContext(), {});
int64_t Offset;
bool OffsetIsScalable;
@@ -1894,7 +1886,6 @@ TargetInstrInfo::describeLoadedValue(const MachineInstr &MI,
// Only describe memory which provably does not escape the function. As
// described in llvm.org/PR43343, escaped memory may be clobbered by the
// callee (or by another thread).
- const auto &TII = MF->getSubtarget().getInstrInfo();
const MachineFrameInfo &MFI = MF->getFrameInfo();
const MachineMemOperand *MMO = MI.memoperands()[0];
const PseudoSourceValue *PSV = MMO->getPseudoValue();
@@ -1905,8 +1896,7 @@ TargetInstrInfo::describeLoadedValue(const MachineInstr &MI,
return std::nullopt;
const MachineOperand *BaseOp;
- if (!TII->getMemOperandWithOffset(MI, BaseOp, Offset, OffsetIsScalable,
- TRI))
+ if (!getMemOperandWithOffset(MI, BaseOp, Offset, OffsetIsScalable, &TRI))
return std::nullopt;
// FIXME: Scalable offsets are not yet handled in the offset code below.
@@ -2045,7 +2035,7 @@ bool TargetInstrInfo::getInsertSubregInputs(
// Returns a MIRPrinter comment for this machine operand.
std::string TargetInstrInfo::createMIROperandComment(
const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx,
- const TargetRegisterInfo *TRI) const {
+ const TargetRegisterInfo * /*RemoveMe*/) const {
if (!MI.isInlineAsm())
return "";
@@ -2078,12 +2068,8 @@ std::string TargetInstrInfo::createMIROperandComment(
OS << F.getKindName();
unsigned RCID;
- if (!F.isImmKind() && !F.isMemKind() && F.hasRegClassConstraint(RCID)) {
- if (TRI) {
- OS << ':' << TRI->getRegClassName(TRI->getRegClass(RCID));
- } else
- OS << ":RC" << RCID;
- }
+ if (!F.isImmKind() && !F.isMemKind() && F.hasRegClassConstraint(RCID))
+ OS << ':' << TRI.getRegClassName(TRI.getRegClass(RCID));
if (F.isMemKind()) {
InlineAsm::ConstraintCode MCID = F.getMemoryConstraintID();
diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index 1cc591c..814b4b5 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -430,6 +430,24 @@ RTLIB::Libcall RTLIB::getSINCOS(EVT RetVT) {
}
RTLIB::Libcall RTLIB::getSINCOSPI(EVT RetVT) {
+ // TODO: Tablegen should generate this function
+ if (RetVT.isVector()) {
+ if (!RetVT.isSimple())
+ return RTLIB::UNKNOWN_LIBCALL;
+ switch (RetVT.getSimpleVT().SimpleTy) {
+ case MVT::v4f32:
+ return RTLIB::SINCOSPI_V4F32;
+ case MVT::v2f64:
+ return RTLIB::SINCOSPI_V2F64;
+ case MVT::nxv4f32:
+ return RTLIB::SINCOSPI_NXV4F32;
+ case MVT::nxv2f64:
+ return RTLIB::SINCOSPI_NXV2F64;
+ default:
+ return RTLIB::UNKNOWN_LIBCALL;
+ }
+ }
+
return getFPLibCall(RetVT, SINCOSPI_F32, SINCOSPI_F64, SINCOSPI_F80,
SINCOSPI_F128, SINCOSPI_PPCF128);
}
diff --git a/llvm/lib/CodeGen/TargetSchedule.cpp b/llvm/lib/CodeGen/TargetSchedule.cpp
index 7ae9e0e..cd951a1 100644
--- a/llvm/lib/CodeGen/TargetSchedule.cpp
+++ b/llvm/lib/CodeGen/TargetSchedule.cpp
@@ -25,7 +25,6 @@
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
-#include <numeric>
using namespace llvm;
diff --git a/llvm/lib/DebugInfo/DWARF/DWARFUnwindTablePrinter.cpp b/llvm/lib/DebugInfo/DWARF/DWARFUnwindTablePrinter.cpp
index a88f4a5..a4bdd1f 100644
--- a/llvm/lib/DebugInfo/DWARF/DWARFUnwindTablePrinter.cpp
+++ b/llvm/lib/DebugInfo/DWARF/DWARFUnwindTablePrinter.cpp
@@ -15,7 +15,6 @@
#include <cassert>
#include <cinttypes>
#include <cstdint>
-#include <optional>
using namespace llvm;
using namespace dwarf;
diff --git a/llvm/lib/Frontend/Driver/CodeGenOptions.cpp b/llvm/lib/Frontend/Driver/CodeGenOptions.cpp
index b546e81..4e16027 100644
--- a/llvm/lib/Frontend/Driver/CodeGenOptions.cpp
+++ b/llvm/lib/Frontend/Driver/CodeGenOptions.cpp
@@ -8,6 +8,7 @@
#include "llvm/Frontend/Driver/CodeGenOptions.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/IR/SystemLibraries.h"
#include "llvm/ProfileData/InstrProfCorrelator.h"
#include "llvm/TargetParser/Triple.h"
@@ -25,35 +26,35 @@ TargetLibraryInfoImpl *createTLII(const llvm::Triple &TargetTriple,
using VectorLibrary = llvm::driver::VectorLibrary;
switch (Veclib) {
case VectorLibrary::Accelerate:
- TLII->addVectorizableFunctionsFromVecLib(TargetLibraryInfoImpl::Accelerate,
+ TLII->addVectorizableFunctionsFromVecLib(llvm::VectorLibrary::Accelerate,
TargetTriple);
break;
case VectorLibrary::LIBMVEC:
- TLII->addVectorizableFunctionsFromVecLib(TargetLibraryInfoImpl::LIBMVEC,
+ TLII->addVectorizableFunctionsFromVecLib(llvm::VectorLibrary::LIBMVEC,
TargetTriple);
break;
case VectorLibrary::MASSV:
- TLII->addVectorizableFunctionsFromVecLib(TargetLibraryInfoImpl::MASSV,
+ TLII->addVectorizableFunctionsFromVecLib(llvm::VectorLibrary::MASSV,
TargetTriple);
break;
case VectorLibrary::SVML:
- TLII->addVectorizableFunctionsFromVecLib(TargetLibraryInfoImpl::SVML,
+ TLII->addVectorizableFunctionsFromVecLib(llvm::VectorLibrary::SVML,
TargetTriple);
break;
case VectorLibrary::SLEEF:
- TLII->addVectorizableFunctionsFromVecLib(TargetLibraryInfoImpl::SLEEFGNUABI,
+ TLII->addVectorizableFunctionsFromVecLib(llvm::VectorLibrary::SLEEFGNUABI,
TargetTriple);
break;
case VectorLibrary::Darwin_libsystem_m:
TLII->addVectorizableFunctionsFromVecLib(
- TargetLibraryInfoImpl::DarwinLibSystemM, TargetTriple);
+ llvm::VectorLibrary::DarwinLibSystemM, TargetTriple);
break;
case VectorLibrary::ArmPL:
- TLII->addVectorizableFunctionsFromVecLib(TargetLibraryInfoImpl::ArmPL,
+ TLII->addVectorizableFunctionsFromVecLib(llvm::VectorLibrary::ArmPL,
TargetTriple);
break;
case VectorLibrary::AMDLIBM:
- TLII->addVectorizableFunctionsFromVecLib(TargetLibraryInfoImpl::AMDLIBM,
+ TLII->addVectorizableFunctionsFromVecLib(llvm::VectorLibrary::AMDLIBM,
TargetTriple);
break;
default:
diff --git a/llvm/lib/Frontend/Offloading/OffloadWrapper.cpp b/llvm/lib/Frontend/Offloading/OffloadWrapper.cpp
index c4aa2c7..45818de 100644
--- a/llvm/lib/Frontend/Offloading/OffloadWrapper.cpp
+++ b/llvm/lib/Frontend/Offloading/OffloadWrapper.cpp
@@ -29,7 +29,6 @@
#include "llvm/Transforms/Utils/ModuleUtils.h"
#include <memory>
-#include <string>
#include <utility>
using namespace llvm;
diff --git a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
index fff9a81..18a4f0a 100644
--- a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
+++ b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
@@ -530,7 +530,13 @@ void OpenMPIRBuilder::getKernelArgsVector(TargetKernelArgs &KernelArgs,
auto Int32Ty = Type::getInt32Ty(Builder.getContext());
constexpr size_t MaxDim = 3;
Value *ZeroArray = Constant::getNullValue(ArrayType::get(Int32Ty, MaxDim));
- Value *Flags = Builder.getInt64(KernelArgs.HasNoWait);
+
+ Value *HasNoWaitFlag = Builder.getInt64(KernelArgs.HasNoWait);
+
+ Value *DynCGroupMemFallbackFlag =
+ Builder.getInt64(static_cast<uint64_t>(KernelArgs.DynCGroupMemFallback));
+ DynCGroupMemFallbackFlag = Builder.CreateShl(DynCGroupMemFallbackFlag, 2);
+ Value *Flags = Builder.CreateOr(HasNoWaitFlag, DynCGroupMemFallbackFlag);
assert(!KernelArgs.NumTeams.empty() && !KernelArgs.NumThreads.empty());
@@ -559,7 +565,7 @@ void OpenMPIRBuilder::getKernelArgsVector(TargetKernelArgs &KernelArgs,
Flags,
NumTeams3D,
NumThreads3D,
- KernelArgs.DynCGGroupMem};
+ KernelArgs.DynCGroupMem};
}
void OpenMPIRBuilder::addAttributes(omp::RuntimeFunction FnID, Function &Fn) {
@@ -8224,7 +8230,8 @@ static void emitTargetCall(
OpenMPIRBuilder::GenMapInfoCallbackTy GenMapInfoCB,
OpenMPIRBuilder::CustomMapperCallbackTy CustomMapperCB,
const SmallVector<llvm::OpenMPIRBuilder::DependData> &Dependencies,
- bool HasNoWait) {
+ bool HasNoWait, Value *DynCGroupMem,
+ OMPDynGroupprivateFallbackType DynCGroupMemFallback) {
// Generate a function call to the host fallback implementation of the target
// region. This is called by the host when no offload entry was generated for
// the target region and when the offloading call fails at runtime.
@@ -8360,12 +8367,13 @@ static void emitTargetCall(
/*isSigned=*/false)
: Builder.getInt64(0);
- // TODO: Use correct DynCGGroupMem
- Value *DynCGGroupMem = Builder.getInt32(0);
+ // Request zero groupprivate bytes by default.
+ if (!DynCGroupMem)
+ DynCGroupMem = Builder.getInt32(0);
- KArgs = OpenMPIRBuilder::TargetKernelArgs(NumTargetItems, RTArgs, TripCount,
- NumTeamsC, NumThreadsC,
- DynCGGroupMem, HasNoWait);
+ KArgs = OpenMPIRBuilder::TargetKernelArgs(
+ NumTargetItems, RTArgs, TripCount, NumTeamsC, NumThreadsC, DynCGroupMem,
+ HasNoWait, DynCGroupMemFallback);
// Assume no error was returned because TaskBodyCB and
// EmitTargetCallFallbackCB don't produce any.
@@ -8414,7 +8422,8 @@ OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::createTarget(
OpenMPIRBuilder::TargetBodyGenCallbackTy CBFunc,
OpenMPIRBuilder::TargetGenArgAccessorsCallbackTy ArgAccessorFuncCB,
CustomMapperCallbackTy CustomMapperCB,
- const SmallVector<DependData> &Dependencies, bool HasNowait) {
+ const SmallVector<DependData> &Dependencies, bool HasNowait,
+ Value *DynCGroupMem, OMPDynGroupprivateFallbackType DynCGroupMemFallback) {
if (!updateToLocation(Loc))
return InsertPointTy();
@@ -8437,7 +8446,8 @@ OpenMPIRBuilder::InsertPointOrErrorTy OpenMPIRBuilder::createTarget(
if (!Config.isTargetDevice())
emitTargetCall(*this, Builder, AllocaIP, Info, DefaultAttrs, RuntimeAttrs,
IfCond, OutlinedFn, OutlinedFnID, Inputs, GenMapInfoCB,
- CustomMapperCB, Dependencies, HasNowait);
+ CustomMapperCB, Dependencies, HasNowait, DynCGroupMem,
+ DynCGroupMemFallback);
return Builder.saveIP();
}
@@ -8460,9 +8470,8 @@ OpenMPIRBuilder::createPlatformSpecificName(ArrayRef<StringRef> Parts) const {
Config.separator());
}
-GlobalVariable *
-OpenMPIRBuilder::getOrCreateInternalVariable(Type *Ty, const StringRef &Name,
- unsigned AddressSpace) {
+GlobalVariable *OpenMPIRBuilder::getOrCreateInternalVariable(
+ Type *Ty, const StringRef &Name, std::optional<unsigned> AddressSpace) {
auto &Elem = *InternalVars.try_emplace(Name, nullptr).first;
if (Elem.second) {
assert(Elem.second->getValueType() == Ty &&
@@ -8472,16 +8481,18 @@ OpenMPIRBuilder::getOrCreateInternalVariable(Type *Ty, const StringRef &Name,
// variable for possibly changing that to internal or private, or maybe
// create different versions of the function for different OMP internal
// variables.
+ const DataLayout &DL = M.getDataLayout();
+ unsigned AddressSpaceVal =
+ AddressSpace ? *AddressSpace : DL.getDefaultGlobalsAddressSpace();
auto Linkage = this->M.getTargetTriple().getArch() == Triple::wasm32
? GlobalValue::InternalLinkage
: GlobalValue::CommonLinkage;
auto *GV = new GlobalVariable(M, Ty, /*IsConstant=*/false, Linkage,
Constant::getNullValue(Ty), Elem.first(),
/*InsertBefore=*/nullptr,
- GlobalValue::NotThreadLocal, AddressSpace);
- const DataLayout &DL = M.getDataLayout();
+ GlobalValue::NotThreadLocal, AddressSpaceVal);
const llvm::Align TypeAlign = DL.getABITypeAlign(Ty);
- const llvm::Align PtrAlign = DL.getPointerABIAlignment(AddressSpace);
+ const llvm::Align PtrAlign = DL.getPointerABIAlignment(AddressSpaceVal);
GV->setAlignment(std::max(TypeAlign, PtrAlign));
Elem.second = GV;
}
diff --git a/llvm/lib/IR/CMakeLists.txt b/llvm/lib/IR/CMakeLists.txt
index 10572ff..ebdc2ca 100644
--- a/llvm/lib/IR/CMakeLists.txt
+++ b/llvm/lib/IR/CMakeLists.txt
@@ -67,6 +67,7 @@ add_llvm_component_library(LLVMCore
ReplaceConstant.cpp
Statepoint.cpp
StructuralHash.cpp
+ SystemLibraries.cpp
Type.cpp
TypedPointerType.cpp
TypeFinder.cpp
diff --git a/llvm/lib/IR/DebugInfo.cpp b/llvm/lib/IR/DebugInfo.cpp
index 5883606..4c5fb74 100644
--- a/llvm/lib/IR/DebugInfo.cpp
+++ b/llvm/lib/IR/DebugInfo.cpp
@@ -247,7 +247,7 @@ void DebugInfoFinder::processType(DIType *DT) {
}
}
-void DebugInfoFinder::processImportedEntity(DIImportedEntity *Import) {
+void DebugInfoFinder::processImportedEntity(const DIImportedEntity *Import) {
auto *Entity = Import->getEntity();
if (auto *T = dyn_cast<DIType>(Entity))
processType(T);
@@ -307,15 +307,13 @@ void DebugInfoFinder::processSubprogram(DISubprogram *SP) {
}
}
- for (auto *N : SP->getRetainedNodes()) {
- if (auto *Var = dyn_cast_or_null<DILocalVariable>(N))
- processVariable(Var);
- else if (auto *Import = dyn_cast_or_null<DIImportedEntity>(N))
- processImportedEntity(Import);
- }
+ SP->forEachRetainedNode(
+ [this](const DILocalVariable *LV) { processVariable(LV); },
+ [](const DILabel *L) {},
+ [this](const DIImportedEntity *IE) { processImportedEntity(IE); });
}
-void DebugInfoFinder::processVariable(DILocalVariable *DV) {
+void DebugInfoFinder::processVariable(const DILocalVariable *DV) {
if (!NodesSeen.insert(DV).second)
return;
processScope(DV->getScope());
diff --git a/llvm/lib/IR/DebugInfoMetadata.cpp b/llvm/lib/IR/DebugInfoMetadata.cpp
index a98e925..1a6a25e 100644
--- a/llvm/lib/IR/DebugInfoMetadata.cpp
+++ b/llvm/lib/IR/DebugInfoMetadata.cpp
@@ -1441,6 +1441,19 @@ bool DISubprogram::describes(const Function *F) const {
assert(F && "Invalid function");
return F->getSubprogram() == this;
}
+
+const DIScope *DISubprogram::getRawRetainedNodeScope(const MDNode *N) {
+ return visitRetainedNode<DIScope *>(
+ N, [](const DILocalVariable *LV) { return LV->getScope(); },
+ [](const DILabel *L) { return L->getScope(); },
+ [](const DIImportedEntity *IE) { return IE->getScope(); },
+ [](const Metadata *N) { return nullptr; });
+}
+
+const DILocalScope *DISubprogram::getRetainedNodeScope(const MDNode *N) {
+ return cast<DILocalScope>(getRawRetainedNodeScope(N));
+}
+
DILexicalBlockBase::DILexicalBlockBase(LLVMContext &C, unsigned ID,
StorageType Storage,
ArrayRef<Metadata *> Ops)
diff --git a/llvm/lib/IR/RuntimeLibcalls.cpp b/llvm/lib/IR/RuntimeLibcalls.cpp
index f4c5c6f..e66b9ad 100644
--- a/llvm/lib/IR/RuntimeLibcalls.cpp
+++ b/llvm/lib/IR/RuntimeLibcalls.cpp
@@ -10,6 +10,7 @@
#include "llvm/ADT/FloatingPointMode.h"
#include "llvm/ADT/StringTable.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/SystemLibraries.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/xxhash.h"
#include "llvm/TargetParser/ARMTargetParser.h"
@@ -25,6 +26,49 @@ using namespace RTLIB;
#define DEFINE_GET_LOOKUP_LIBCALL_IMPL_NAME
#include "llvm/IR/RuntimeLibcalls.inc"
+RuntimeLibcallsInfo::RuntimeLibcallsInfo(const Triple &TT,
+ ExceptionHandling ExceptionModel,
+ FloatABI::ABIType FloatABI,
+ EABI EABIVersion, StringRef ABIName) {
+ // FIXME: The ExceptionModel parameter is to handle the field in
+ // TargetOptions. This interface fails to distinguish the forced disable
+ // case for targets which support exceptions by default. This should
+ // probably be a module flag and removed from TargetOptions.
+ if (ExceptionModel == ExceptionHandling::None)
+ ExceptionModel = TT.getDefaultExceptionHandling();
+
+ initLibcalls(TT, ExceptionModel, FloatABI, EABIVersion, ABIName);
+
+ // TODO: Tablegen should generate these sets
+ switch (ClVectorLibrary) {
+ case VectorLibrary::SLEEFGNUABI:
+ for (RTLIB::LibcallImpl Impl :
+ {RTLIB::impl__ZGVnN2vl8l8_sincos, RTLIB::impl__ZGVnN4vl4l4_sincosf,
+ RTLIB::impl__ZGVsNxvl8l8_sincos, RTLIB::impl__ZGVsNxvl4l4_sincosf,
+ RTLIB::impl__ZGVnN4vl4l4_sincospif, RTLIB::impl__ZGVnN2vl8l8_sincospi,
+ RTLIB::impl__ZGVsNxvl4l4_sincospif,
+ RTLIB::impl__ZGVsNxvl8l8_sincospi})
+ setAvailable(Impl);
+ break;
+ case VectorLibrary::ArmPL:
+ for (RTLIB::LibcallImpl Impl :
+ {RTLIB::impl_armpl_vsincosq_f64, RTLIB::impl_armpl_vsincosq_f32,
+ RTLIB::impl_armpl_svsincos_f64_x, RTLIB::impl_armpl_svsincos_f32_x,
+ RTLIB::impl_armpl_vsincospiq_f32, RTLIB::impl_armpl_vsincospiq_f64,
+ RTLIB::impl_armpl_svsincospi_f32_x,
+ RTLIB::impl_armpl_svsincospi_f64_x})
+ setAvailable(Impl);
+
+ for (RTLIB::LibcallImpl Impl :
+ {RTLIB::impl_armpl_vsincosq_f64, RTLIB::impl_armpl_vsincosq_f32})
+ setLibcallImplCallingConv(Impl, CallingConv::AArch64_VectorCall);
+
+ break;
+ default:
+ break;
+ }
+}
+
RuntimeLibcallsInfo::RuntimeLibcallsInfo(const Module &M)
: RuntimeLibcallsInfo(M.getTargetTriple()) {
// TODO: Consider module flags
@@ -88,6 +132,8 @@ RuntimeLibcallsInfo::getFunctionTy(LLVMContext &Ctx, const Triple &TT,
static constexpr Attribute::AttrKind CommonFnAttrs[] = {
Attribute::NoCallback, Attribute::NoFree, Attribute::NoSync,
Attribute::NoUnwind, Attribute::WillReturn};
+ static constexpr Attribute::AttrKind CommonPtrArgAttrs[] = {
+ Attribute::NoAlias, Attribute::WriteOnly, Attribute::NonNull};
switch (LibcallImpl) {
case RTLIB::impl___sincos_stret:
@@ -151,9 +197,86 @@ RuntimeLibcallsInfo::getFunctionTy(LLVMContext &Ctx, const Triple &TT,
fcNegNormal));
return {FuncTy, Attrs};
}
+ case RTLIB::impl__ZGVnN2vl8l8_sincos:
+ case RTLIB::impl__ZGVnN4vl4l4_sincosf:
+ case RTLIB::impl__ZGVsNxvl8l8_sincos:
+ case RTLIB::impl__ZGVsNxvl4l4_sincosf:
+ case RTLIB::impl_armpl_vsincosq_f64:
+ case RTLIB::impl_armpl_vsincosq_f32:
+ case RTLIB::impl_armpl_svsincos_f64_x:
+ case RTLIB::impl_armpl_svsincos_f32_x:
+ case RTLIB::impl__ZGVnN4vl4l4_sincospif:
+ case RTLIB::impl__ZGVnN2vl8l8_sincospi:
+ case RTLIB::impl__ZGVsNxvl4l4_sincospif:
+ case RTLIB::impl__ZGVsNxvl8l8_sincospi:
+ case RTLIB::impl_armpl_vsincospiq_f32:
+ case RTLIB::impl_armpl_vsincospiq_f64:
+ case RTLIB::impl_armpl_svsincospi_f32_x:
+ case RTLIB::impl_armpl_svsincospi_f64_x: {
+ AttrBuilder FuncAttrBuilder(Ctx);
+
+ bool IsF32 = LibcallImpl == RTLIB::impl__ZGVnN4vl4l4_sincospif ||
+ LibcallImpl == RTLIB::impl__ZGVsNxvl4l4_sincospif ||
+ LibcallImpl == RTLIB::impl_armpl_vsincospiq_f32 ||
+ LibcallImpl == RTLIB::impl_armpl_svsincospi_f32_x ||
+ LibcallImpl == RTLIB::impl__ZGVnN4vl4l4_sincosf ||
+ LibcallImpl == RTLIB::impl__ZGVsNxvl4l4_sincosf ||
+ LibcallImpl == RTLIB::impl_armpl_vsincosq_f32 ||
+ LibcallImpl == RTLIB::impl_armpl_svsincos_f32_x;
+
+ Type *ScalarTy = IsF32 ? Type::getFloatTy(Ctx) : Type::getDoubleTy(Ctx);
+ unsigned EC = IsF32 ? 4 : 2;
+
+ bool IsScalable = LibcallImpl == RTLIB::impl__ZGVsNxvl8l8_sincos ||
+ LibcallImpl == RTLIB::impl__ZGVsNxvl4l4_sincosf ||
+ LibcallImpl == RTLIB::impl_armpl_svsincos_f32_x ||
+ LibcallImpl == RTLIB::impl_armpl_svsincos_f64_x ||
+ LibcallImpl == RTLIB::impl__ZGVsNxvl4l4_sincospif ||
+ LibcallImpl == RTLIB::impl__ZGVsNxvl8l8_sincospi ||
+ LibcallImpl == RTLIB::impl_armpl_svsincospi_f32_x ||
+ LibcallImpl == RTLIB::impl_armpl_svsincospi_f64_x;
+ VectorType *VecTy = VectorType::get(ScalarTy, EC, IsScalable);
+
+ for (Attribute::AttrKind Attr : CommonFnAttrs)
+ FuncAttrBuilder.addAttribute(Attr);
+ FuncAttrBuilder.addMemoryAttr(MemoryEffects::argMemOnly(ModRefInfo::Mod));
+
+ AttributeList Attrs;
+ Attrs = Attrs.addFnAttributes(Ctx, FuncAttrBuilder);
+
+ {
+ AttrBuilder ArgAttrBuilder(Ctx);
+ for (Attribute::AttrKind AK : CommonPtrArgAttrs)
+ ArgAttrBuilder.addAttribute(AK);
+ ArgAttrBuilder.addAlignmentAttr(DL.getABITypeAlign(VecTy));
+ Attrs = Attrs.addParamAttributes(Ctx, 1, ArgAttrBuilder);
+ Attrs = Attrs.addParamAttributes(Ctx, 2, ArgAttrBuilder);
+ }
+
+ PointerType *PtrTy = PointerType::get(Ctx, 0);
+ SmallVector<Type *, 4> ArgTys = {VecTy, PtrTy, PtrTy};
+ if (hasVectorMaskArgument(LibcallImpl))
+ ArgTys.push_back(VectorType::get(Type::getInt1Ty(Ctx), EC, IsScalable));
+
+ return {FunctionType::get(Type::getVoidTy(Ctx), ArgTys, false), Attrs};
+ }
default:
return {};
}
return {};
}
+
+bool RuntimeLibcallsInfo::hasVectorMaskArgument(RTLIB::LibcallImpl Impl) {
+ /// FIXME: This should be generated by tablegen and support the argument at an
+ /// arbitrary position
+ switch (Impl) {
+ case RTLIB::impl_armpl_svsincos_f32_x:
+ case RTLIB::impl_armpl_svsincos_f64_x:
+ case RTLIB::impl_armpl_svsincospi_f32_x:
+ case RTLIB::impl_armpl_svsincospi_f64_x:
+ return true;
+ default:
+ return false;
+ }
+}
diff --git a/llvm/lib/IR/SystemLibraries.cpp b/llvm/lib/IR/SystemLibraries.cpp
new file mode 100644
index 0000000..fa4ac2a
--- /dev/null
+++ b/llvm/lib/IR/SystemLibraries.cpp
@@ -0,0 +1,34 @@
+//===-----------------------------------------------------------------------==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/SystemLibraries.h"
+#include "llvm/Support/CommandLine.h"
+
+using namespace llvm;
+
+VectorLibrary llvm::ClVectorLibrary;
+
+static cl::opt<VectorLibrary, true> ClVectorLibraryOpt(
+ "vector-library", cl::Hidden, cl::desc("Vector functions library"),
+ cl::location(llvm::ClVectorLibrary), cl::init(VectorLibrary::NoLibrary),
+ cl::values(
+ clEnumValN(VectorLibrary::NoLibrary, "none",
+ "No vector functions library"),
+ clEnumValN(VectorLibrary::Accelerate, "Accelerate",
+ "Accelerate framework"),
+ clEnumValN(VectorLibrary::DarwinLibSystemM, "Darwin_libsystem_m",
+ "Darwin libsystem_m"),
+ clEnumValN(VectorLibrary::LIBMVEC, "LIBMVEC",
+ "GLIBC Vector Math library"),
+ clEnumValN(VectorLibrary::MASSV, "MASSV", "IBM MASS vector library"),
+ clEnumValN(VectorLibrary::SVML, "SVML", "Intel SVML library"),
+ clEnumValN(VectorLibrary::SLEEFGNUABI, "sleefgnuabi",
+ "SIMD Library for Evaluating Elementary Functions"),
+ clEnumValN(VectorLibrary::ArmPL, "ArmPL", "Arm Performance Libraries"),
+ clEnumValN(VectorLibrary::AMDLIBM, "AMDLIBM",
+ "AMD vector math library")));
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index 59eb870..a4f6474 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -1559,11 +1559,27 @@ void Verifier::visitDISubprogram(const DISubprogram &N) {
auto *Node = dyn_cast<MDTuple>(RawNode);
CheckDI(Node, "invalid retained nodes list", &N, RawNode);
for (Metadata *Op : Node->operands()) {
- CheckDI(Op && (isa<DILocalVariable>(Op) || isa<DILabel>(Op) ||
- isa<DIImportedEntity>(Op)),
+ CheckDI(Op, "nullptr in retained nodes", &N, Node);
+
+ auto True = [](const Metadata *) { return true; };
+ auto False = [](const Metadata *) { return false; };
+ bool IsTypeCorrect =
+ DISubprogram::visitRetainedNode<bool>(Op, True, True, True, False);
+ CheckDI(IsTypeCorrect,
"invalid retained nodes, expected DILocalVariable, DILabel or "
"DIImportedEntity",
&N, Node, Op);
+
+ auto *RetainedNode = cast<DINode>(Op);
+ auto *RetainedNodeScope = dyn_cast_or_null<DILocalScope>(
+ DISubprogram::getRawRetainedNodeScope(RetainedNode));
+ CheckDI(RetainedNodeScope,
+ "invalid retained nodes, retained node is not local", &N, Node,
+ RetainedNode);
+ CheckDI(
+ RetainedNodeScope->getSubprogram() == &N,
+ "invalid retained nodes, retained node does not belong to subprogram",
+ &N, Node, RetainedNode, RetainedNodeScope);
}
}
CheckDI(!hasConflictingReferenceFlags(N.getFlags()),
diff --git a/llvm/lib/Passes/PassBuilder.cpp b/llvm/lib/Passes/PassBuilder.cpp
index 40ceb6f..0d190ea 100644
--- a/llvm/lib/Passes/PassBuilder.cpp
+++ b/llvm/lib/Passes/PassBuilder.cpp
@@ -45,7 +45,6 @@
#include "llvm/Analysis/IR2Vec.h"
#include "llvm/Analysis/IVUsers.h"
#include "llvm/Analysis/InlineAdvisor.h"
-#include "llvm/Analysis/InlineSizeEstimatorAnalysis.h"
#include "llvm/Analysis/InstCount.h"
#include "llvm/Analysis/KernelInfo.h"
#include "llvm/Analysis/LastRunTrackingAnalysis.h"
@@ -900,6 +899,11 @@ Expected<bool> parseEntryExitInstrumenterPassOptions(StringRef Params) {
"EntryExitInstrumenter");
}
+Expected<bool> parseDropUnnecessaryAssumesPassOptions(StringRef Params) {
+ return PassBuilder::parseSinglePassOption(Params, "drop-deref",
+ "DropUnnecessaryAssumes");
+}
+
Expected<bool> parseLoopExtractorPassOptions(StringRef Params) {
return PassBuilder::parseSinglePassOption(Params, "single", "LoopExtractor");
}
diff --git a/llvm/lib/Passes/PassBuilderPipelines.cpp b/llvm/lib/Passes/PassBuilderPipelines.cpp
index 3f41618..2fe963b 100644
--- a/llvm/lib/Passes/PassBuilderPipelines.cpp
+++ b/llvm/lib/Passes/PassBuilderPipelines.cpp
@@ -1298,10 +1298,18 @@ PassBuilder::buildModuleSimplificationPipeline(OptimizationLevel Level,
/// TODO: Should LTO cause any differences to this set of passes?
void PassBuilder::addVectorPasses(OptimizationLevel Level,
- FunctionPassManager &FPM, bool IsFullLTO) {
+ FunctionPassManager &FPM,
+ ThinOrFullLTOPhase LTOPhase) {
+ const bool IsFullLTO = LTOPhase == ThinOrFullLTOPhase::FullLTOPostLink;
+
FPM.addPass(LoopVectorizePass(
LoopVectorizeOptions(!PTO.LoopInterleaving, !PTO.LoopVectorization)));
+ // Drop dereferenceable assumes after vectorization, as they are no longer
+ // needed and can inhibit further optimization.
+ if (!isLTOPreLink(LTOPhase))
+ FPM.addPass(DropUnnecessaryAssumesPass(/*DropDereferenceable=*/true));
+
FPM.addPass(InferAlignmentPass());
if (IsFullLTO) {
// The vectorizer may have significantly shortened a loop body; unroll
@@ -1572,7 +1580,7 @@ PassBuilder::buildModuleOptimizationPipeline(OptimizationLevel Level,
// from the TargetLibraryInfo.
OptimizePM.addPass(InjectTLIMappings());
- addVectorPasses(Level, OptimizePM, /* IsFullLTO */ false);
+ addVectorPasses(Level, OptimizePM, LTOPhase);
invokeVectorizerEndEPCallbacks(OptimizePM, Level);
@@ -2162,7 +2170,7 @@ PassBuilder::buildLTODefaultPipeline(OptimizationLevel Level,
MainFPM.addPass(LoopDistributePass());
- addVectorPasses(Level, MainFPM, /* IsFullLTO */ true);
+ addVectorPasses(Level, MainFPM, ThinOrFullLTOPhase::FullLTOPostLink);
invokeVectorizerEndEPCallbacks(MainFPM, Level);
diff --git a/llvm/lib/Passes/PassRegistry.def b/llvm/lib/Passes/PassRegistry.def
index d870f99..074c328 100644
--- a/llvm/lib/Passes/PassRegistry.def
+++ b/llvm/lib/Passes/PassRegistry.def
@@ -359,7 +359,6 @@ FUNCTION_ANALYSIS("ephemerals", EphemeralValuesAnalysis())
FUNCTION_ANALYSIS("func-properties", FunctionPropertiesAnalysis())
FUNCTION_ANALYSIS("machine-function-info", MachineFunctionAnalysis(*TM))
FUNCTION_ANALYSIS("gc-function", GCFunctionAnalysis())
-FUNCTION_ANALYSIS("inliner-size-estimator", InlineSizeEstimatorAnalysis())
FUNCTION_ANALYSIS("last-run-tracking", LastRunTrackingAnalysis())
FUNCTION_ANALYSIS("lazy-value-info", LazyValueAnalysis())
FUNCTION_ANALYSIS("loops", LoopAnalysis())
@@ -432,7 +431,6 @@ FUNCTION_PASS("dot-post-dom", PostDomPrinter())
FUNCTION_PASS("dot-post-dom-only", PostDomOnlyPrinter())
FUNCTION_PASS("dse", DSEPass())
FUNCTION_PASS("dwarf-eh-prepare", DwarfEHPreparePass(*TM))
-FUNCTION_PASS("drop-unnecessary-assumes", DropUnnecessaryAssumesPass())
FUNCTION_PASS("expand-large-div-rem", ExpandLargeDivRemPass(*TM))
FUNCTION_PASS("expand-memcmp", ExpandMemCmpPass(*TM))
FUNCTION_PASS("expand-reductions", ExpandReductionsPass())
@@ -516,8 +514,6 @@ FUNCTION_PASS("print<domfrontier>", DominanceFrontierPrinterPass(errs()))
FUNCTION_PASS("print<domtree>", DominatorTreePrinterPass(errs()))
FUNCTION_PASS("print<func-properties>", FunctionPropertiesPrinterPass(errs()))
FUNCTION_PASS("print<inline-cost>", InlineCostAnnotationPrinterPass(errs()))
-FUNCTION_PASS("print<inliner-size-estimator>",
- InlineSizeEstimatorAnalysisPrinterPass(errs()))
FUNCTION_PASS("print<lazy-value-info>", LazyValueInfoPrinterPass(errs()))
FUNCTION_PASS("print<loops>", LoopPrinterPass(errs()))
FUNCTION_PASS("print<memoryssa-walker>", MemorySSAWalkerPrinterPass(errs()))
@@ -585,6 +581,10 @@ FUNCTION_PASS_WITH_PARAMS(
[](bool UseMemorySSA) { return EarlyCSEPass(UseMemorySSA); },
parseEarlyCSEPassOptions, "memssa")
FUNCTION_PASS_WITH_PARAMS(
+ "drop-unnecessary-assumes", "DropUnnecessaryAssumesPass",
+ [](bool DropDereferenceable) { return DropUnnecessaryAssumesPass(DropDereferenceable); },
+ parseDropUnnecessaryAssumesPassOptions, "drop-deref")
+FUNCTION_PASS_WITH_PARAMS(
"ee-instrument", "EntryExitInstrumenterPass",
[](bool PostInlining) { return EntryExitInstrumenterPass(PostInlining); },
parseEntryExitInstrumenterPassOptions, "post-inline")
diff --git a/llvm/lib/Support/Mustache.cpp b/llvm/lib/Support/Mustache.cpp
index 6c140be..8b95049 100644
--- a/llvm/lib/Support/Mustache.cpp
+++ b/llvm/lib/Support/Mustache.cpp
@@ -10,7 +10,6 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include <cctype>
-#include <optional>
#include <sstream>
#define DEBUG_TYPE "mustache"
diff --git a/llvm/lib/Support/SpecialCaseList.cpp b/llvm/lib/Support/SpecialCaseList.cpp
index 246d90c..beec8b8 100644
--- a/llvm/lib/Support/SpecialCaseList.cpp
+++ b/llvm/lib/Support/SpecialCaseList.cpp
@@ -15,11 +15,13 @@
#include "llvm/Support/SpecialCaseList.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/LineIterator.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/VirtualFileSystem.h"
+#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <limits>
#include <memory>
@@ -63,12 +65,12 @@ void SpecialCaseList::RegexMatcher::preprocess(bool BySize) {
}
}
-void SpecialCaseList::RegexMatcher::match(
- StringRef Query,
- llvm::function_ref<void(StringRef Rule, unsigned LineNo)> Cb) const {
+SpecialCaseList::Match
+SpecialCaseList::RegexMatcher::match(StringRef Query) const {
for (const auto &R : reverse(RegExes))
if (R.Rg.match(Query))
- return Cb(R.Name, R.LineNo);
+ return {R.Name, R.LineNo};
+ return NotMatched;
}
Error SpecialCaseList::GlobMatcher::insert(StringRef Pattern,
@@ -90,7 +92,7 @@ void SpecialCaseList::GlobMatcher::preprocess(bool BySize) {
});
}
- for (const auto &G : reverse(Globs)) {
+ for (const auto &[Idx, G] : enumerate(Globs)) {
StringRef Prefix = G.Pattern.prefix();
StringRef Suffix = G.Pattern.suffix();
@@ -102,26 +104,29 @@ void SpecialCaseList::GlobMatcher::preprocess(bool BySize) {
// But only if substring is not empty. Searching this tree is more
// expensive.
auto &V = SubstrToGlob.emplace(Substr).first->second;
- V.emplace_back(&G);
+ V.emplace_back(Idx);
continue;
}
}
auto &SToGlob = PrefixSuffixToGlob.emplace(Prefix).first->second;
auto &V = SToGlob.emplace(reverse(Suffix)).first->second;
- V.emplace_back(&G);
+ V.emplace_back(Idx);
}
}
-void SpecialCaseList::GlobMatcher::match(
- StringRef Query,
- llvm::function_ref<void(StringRef Rule, unsigned LineNo)> Cb) const {
+SpecialCaseList::Match
+SpecialCaseList::GlobMatcher::match(StringRef Query) const {
+ int Best = -1;
if (!PrefixSuffixToGlob.empty()) {
for (const auto &[_, SToGlob] : PrefixSuffixToGlob.find_prefixes(Query)) {
for (const auto &[_, V] : SToGlob.find_prefixes(reverse(Query))) {
- for (const auto *G : V) {
- if (G->Pattern.match(Query)) {
- Cb(G->Name, G->LineNo);
+ for (int Idx : reverse(V)) {
+ if (Best > Idx)
+ break;
+ const GlobMatcher::Glob &G = Globs[Idx];
+ if (G.Pattern.match(Query)) {
+ Best = Idx;
// As soon as we find a match in the vector, we can break for this
// vector, since the globs are already sorted by priority within the
// prefix group. However, we continue searching other prefix groups
@@ -138,9 +143,12 @@ void SpecialCaseList::GlobMatcher::match(
// possibilities. In most cases search will fail on first characters.
for (StringRef Q = Query; !Q.empty(); Q = Q.drop_front()) {
for (const auto &[_, V] : SubstrToGlob.find_prefixes(Q)) {
- for (const auto *G : V) {
- if (G->Pattern.match(Query)) {
- Cb(G->Name, G->LineNo);
+ for (int Idx : reverse(V)) {
+ if (Best > Idx)
+ break;
+ const GlobMatcher::Glob &G = Globs[Idx];
+ if (G.Pattern.match(Query)) {
+ Best = Idx;
// As soon as we find a match in the vector, we can break for this
// vector, since the globs are already sorted by priority within the
// prefix group. However, we continue searching other prefix groups
@@ -151,6 +159,9 @@ void SpecialCaseList::GlobMatcher::match(
}
}
}
+ if (Best < 0)
+ return NotMatched;
+ return {Globs[Best].Name, Globs[Best].LineNo};
}
SpecialCaseList::Matcher::Matcher(bool UseGlobs, bool RemoveDotSlash)
@@ -169,12 +180,11 @@ void SpecialCaseList::Matcher::preprocess(bool BySize) {
return std::visit([&](auto &V) { return V.preprocess(BySize); }, M);
}
-void SpecialCaseList::Matcher::match(
- StringRef Query,
- llvm::function_ref<void(StringRef Rule, unsigned LineNo)> Cb) const {
+SpecialCaseList::Match SpecialCaseList::Matcher::match(StringRef Query) const {
if (RemoveDotSlash)
Query = llvm::sys::path::remove_leading_dotslash(Query);
- return std::visit([&](auto &V) { return V.match(Query, Cb); }, M);
+ return std::visit(
+ [&](auto &V) -> SpecialCaseList::Match { return V.match(Query); }, M);
}
// TODO: Refactor this to return Expected<...>
@@ -264,11 +274,12 @@ bool SpecialCaseList::parse(unsigned FileIdx, const MemoryBuffer *MB,
bool RemoveDotSlash = Version > 2;
- Section *CurrentSection;
- if (auto Err = addSection("*", FileIdx, 1, true).moveInto(CurrentSection)) {
+ auto ErrOrSection = addSection("*", FileIdx, 1, true);
+ if (auto Err = ErrOrSection.takeError()) {
Error = toString(std::move(Err));
return false;
}
+ Section *CurrentSection = ErrOrSection.get();
// This is the current list of prefixes for all existing users matching file
// path. We may need parametrization in constructor in future.
@@ -290,12 +301,13 @@ bool SpecialCaseList::parse(unsigned FileIdx, const MemoryBuffer *MB,
return false;
}
- if (auto Err = addSection(Line.drop_front().drop_back(), FileIdx, LineNo,
- UseGlobs)
- .moveInto(CurrentSection)) {
+ auto ErrOrSection =
+ addSection(Line.drop_front().drop_back(), FileIdx, LineNo, UseGlobs);
+ if (auto Err = ErrOrSection.takeError()) {
Error = toString(std::move(Err));
return false;
}
+ CurrentSection = ErrOrSection.get();
continue;
}
@@ -348,6 +360,10 @@ SpecialCaseList::inSectionBlame(StringRef Section, StringRef Prefix,
return NotFound;
}
+bool SpecialCaseList::Section::matchName(StringRef Name) const {
+ return SectionMatcher.matchAny(Name);
+}
+
const SpecialCaseList::Matcher *
SpecialCaseList::Section::findMatcher(StringRef Prefix,
StringRef Category) const {
@@ -371,26 +387,21 @@ LLVM_ABI void SpecialCaseList::Section::preprocess(bool OrderBySize) {
unsigned SpecialCaseList::Section::getLastMatch(StringRef Prefix,
StringRef Query,
StringRef Category) const {
- unsigned LastLine = 0;
- if (const Matcher *M = findMatcher(Prefix, Category)) {
- M->match(Query, [&](StringRef, unsigned LineNo) {
- LastLine = std::max(LastLine, LineNo);
- });
- }
- return LastLine;
+ if (const Matcher *M = findMatcher(Prefix, Category))
+ return M->match(Query).second;
+ return 0;
}
StringRef SpecialCaseList::Section::getLongestMatch(StringRef Prefix,
StringRef Query,
StringRef Category) const {
- StringRef LongestRule;
- if (const Matcher *M = findMatcher(Prefix, Category)) {
- M->match(Query, [&](StringRef Rule, unsigned) {
- if (LongestRule.size() < Rule.size())
- LongestRule = Rule;
- });
- }
- return LongestRule;
+ if (const Matcher *M = findMatcher(Prefix, Category))
+ return M->match(Query).first;
+ return {};
+}
+
+bool SpecialCaseList::Section::hasPrefix(StringRef Prefix) const {
+ return Entries.find(Prefix) != Entries.end();
}
} // namespace llvm
diff --git a/llvm/lib/Support/Unix/Unix.h b/llvm/lib/Support/Unix/Unix.h
index a1d44c6..f24d524 100644
--- a/llvm/lib/Support/Unix/Unix.h
+++ b/llvm/lib/Support/Unix/Unix.h
@@ -22,7 +22,6 @@
#include "llvm/Support/Chrono.h"
#include "llvm/Support/Errno.h"
#include "llvm/Support/ErrorHandling.h"
-#include <algorithm>
#include <assert.h>
#include <cerrno>
#include <cstdio>
diff --git a/llvm/lib/Support/Windows/Program.inc b/llvm/lib/Support/Windows/Program.inc
index ec785e4..5dcd2c9 100644
--- a/llvm/lib/Support/Windows/Program.inc
+++ b/llvm/lib/Support/Windows/Program.inc
@@ -23,7 +23,6 @@
#include <fcntl.h>
#include <io.h>
#include <malloc.h>
-#include <numeric>
#include <psapi.h>
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Support/Windows/Signals.inc b/llvm/lib/Support/Windows/Signals.inc
index da68994..bacbb76 100644
--- a/llvm/lib/Support/Windows/Signals.inc
+++ b/llvm/lib/Support/Windows/Signals.inc
@@ -16,7 +16,6 @@
#include "llvm/Support/Path.h"
#include "llvm/Support/Process.h"
#include "llvm/Support/WindowsError.h"
-#include <algorithm>
#include <io.h>
#include <signal.h>
#include <stdio.h>
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 76a790dc..8457f61 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -22308,6 +22308,37 @@ static SDValue performExtBinopLoadFold(SDNode *N, SelectionDAG &DAG) {
return DAG.getNode(N->getOpcode(), DL, VT, Ext0, NShift);
}
+// Attempt to combine the following patterns:
+// SUB x, (CSET LO, (CMP a, b)) -> SBC x, 0, (CMP a, b)
+// SUB (SUB x, y), (CSET LO, (CMP a, b)) -> SBC x, y, (CMP a, b)
+// The CSET may be preceded by a ZEXT.
+static SDValue performSubWithBorrowCombine(SDNode *N, SelectionDAG &DAG) {
+ if (N->getOpcode() != ISD::SUB)
+ return SDValue();
+
+ EVT VT = N->getValueType(0);
+ if (VT != MVT::i32 && VT != MVT::i64)
+ return SDValue();
+
+ SDValue N1 = N->getOperand(1);
+ if (N1.getOpcode() == ISD::ZERO_EXTEND && N1.hasOneUse())
+ N1 = N1.getOperand(0);
+ if (!N1.hasOneUse() || getCSETCondCode(N1) != AArch64CC::LO)
+ return SDValue();
+
+ SDValue Flags = N1.getOperand(3);
+ if (Flags.getOpcode() != AArch64ISD::SUBS)
+ return SDValue();
+
+ SDLoc DL(N);
+ SDValue N0 = N->getOperand(0);
+ if (N0->getOpcode() == ISD::SUB)
+ return DAG.getNode(AArch64ISD::SBC, DL, VT, N0.getOperand(0),
+ N0.getOperand(1), Flags);
+ return DAG.getNode(AArch64ISD::SBC, DL, VT, N0, DAG.getConstant(0, DL, VT),
+ Flags);
+}
+
static SDValue performAddSubCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI) {
// Try to change sum of two reductions.
@@ -22329,6 +22360,8 @@ static SDValue performAddSubCombine(SDNode *N,
return Val;
if (SDValue Val = performAddSubIntoVectorOp(N, DCI.DAG))
return Val;
+ if (SDValue Val = performSubWithBorrowCombine(N, DCI.DAG))
+ return Val;
if (SDValue Val = performExtBinopLoadFold(N, DCI.DAG))
return Val;
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index 4b40733..66e4949 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -91,7 +91,7 @@ static cl::opt<unsigned> GatherOptSearchLimit(
"machine-combiner gather pattern optimization"));
AArch64InstrInfo::AArch64InstrInfo(const AArch64Subtarget &STI)
- : AArch64GenInstrInfo(STI, AArch64::ADJCALLSTACKDOWN,
+ : AArch64GenInstrInfo(STI, RI, AArch64::ADJCALLSTACKDOWN,
AArch64::ADJCALLSTACKUP, AArch64::CATCHRET),
RI(STI.getTargetTriple(), STI.getHwMode()), Subtarget(STI) {}
@@ -1780,6 +1780,16 @@ static unsigned sForm(MachineInstr &Instr) {
case AArch64::SUBSWri:
case AArch64::SUBSXrr:
case AArch64::SUBSXri:
+ case AArch64::ANDSWri:
+ case AArch64::ANDSWrr:
+ case AArch64::ANDSWrs:
+ case AArch64::ANDSXri:
+ case AArch64::ANDSXrr:
+ case AArch64::ANDSXrs:
+ case AArch64::BICSWrr:
+ case AArch64::BICSXrr:
+ case AArch64::BICSWrs:
+ case AArch64::BICSXrs:
return Instr.getOpcode();
case AArch64::ADDWrr:
@@ -1810,6 +1820,22 @@ static unsigned sForm(MachineInstr &Instr) {
return AArch64::ANDSWri;
case AArch64::ANDXri:
return AArch64::ANDSXri;
+ case AArch64::ANDWrr:
+ return AArch64::ANDSWrr;
+ case AArch64::ANDWrs:
+ return AArch64::ANDSWrs;
+ case AArch64::ANDXrr:
+ return AArch64::ANDSXrr;
+ case AArch64::ANDXrs:
+ return AArch64::ANDSXrs;
+ case AArch64::BICWrr:
+ return AArch64::BICSWrr;
+ case AArch64::BICXrr:
+ return AArch64::BICSXrr;
+ case AArch64::BICWrs:
+ return AArch64::BICSWrs;
+ case AArch64::BICXrs:
+ return AArch64::BICSXrs;
}
}
@@ -1947,6 +1973,25 @@ static bool isSUBSRegImm(unsigned Opcode) {
return Opcode == AArch64::SUBSWri || Opcode == AArch64::SUBSXri;
}
+static bool isANDOpcode(MachineInstr &MI) {
+ unsigned Opc = sForm(MI);
+ switch (Opc) {
+ case AArch64::ANDSWri:
+ case AArch64::ANDSWrr:
+ case AArch64::ANDSWrs:
+ case AArch64::ANDSXri:
+ case AArch64::ANDSXrr:
+ case AArch64::ANDSXrs:
+ case AArch64::BICSWrr:
+ case AArch64::BICSXrr:
+ case AArch64::BICSWrs:
+ case AArch64::BICSXrs:
+ return true;
+ default:
+ return false;
+ }
+}
+
/// Check if CmpInstr can be substituted by MI.
///
/// CmpInstr can be substituted:
@@ -1984,7 +2029,8 @@ static bool canInstrSubstituteCmpInstr(MachineInstr &MI, MachineInstr &CmpInstr,
// 1) MI and CmpInstr set N and V to the same value.
// 2) If MI is add/sub with no-signed-wrap, it produces a poison value when
// signed overflow occurs, so CmpInstr could still be simplified away.
- if (NZVCUsed->V && !MI.getFlag(MachineInstr::NoSWrap))
+ // Note that Ands and Bics instructions always clear the V flag.
+ if (NZVCUsed->V && !MI.getFlag(MachineInstr::NoSWrap) && !isANDOpcode(MI))
return false;
AccessKind AccessToCheck = AK_Write;
@@ -9590,6 +9636,27 @@ AArch64InstrInfo::getOutliningCandidateInfo(
unsigned NumBytesToCreateFrame = 0;
+ // Avoid splitting ADRP ADD/LDR pair into outlined functions.
+ // These instructions are fused together by the scheduler.
+ // Any candidate where ADRP is the last instruction should be rejected
+ // as that will lead to splitting ADRP pair.
+ MachineInstr &LastMI = RepeatedSequenceLocs[0].back();
+ MachineInstr &FirstMI = RepeatedSequenceLocs[0].front();
+ if (LastMI.getOpcode() == AArch64::ADRP &&
+ (LastMI.getOperand(1).getTargetFlags() & AArch64II::MO_PAGE) != 0 &&
+ (LastMI.getOperand(1).getTargetFlags() & AArch64II::MO_GOT) != 0) {
+ return std::nullopt;
+ }
+
+ // Similarly any candidate where the first instruction is ADD/LDR with a
+ // page offset should be rejected to avoid ADRP splitting.
+ if ((FirstMI.getOpcode() == AArch64::ADDXri ||
+ FirstMI.getOpcode() == AArch64::LDRXui) &&
+ (FirstMI.getOperand(2).getTargetFlags() & AArch64II::MO_PAGEOFF) != 0 &&
+ (FirstMI.getOperand(2).getTargetFlags() & AArch64II::MO_GOT) != 0) {
+ return std::nullopt;
+ }
+
// We only allow outlining for functions having exactly matching return
// address signing attributes, i.e., all share the same value for the
// attribute "sign-return-address" and all share the same type of key they
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index 76f076a..b30e3d0 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -4444,6 +4444,11 @@ defm PRFUM : PrefetchUnscaled<0b11, 0, 0b10, "prfum",
[(AArch64Prefetch timm:$Rt,
(am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
+// PRFM falls back to PRFUM for negative or unaligned offsets (not a multiple
+// of 8).
+def : InstAlias<"prfm $Rt, [$Rn, $offset]",
+ (PRFUMi prfop:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
+
//---
// (unscaled immediate, unprivileged)
defm LDTRX : LoadUnprivileged<0b11, 0, 0b01, GPR64, "ldtr">;
diff --git a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
index a5048b9..eaf8723 100644
--- a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
@@ -1123,24 +1123,85 @@ unsigned AArch64RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
}
}
-// FORM_TRANSPOSED_REG_TUPLE nodes are created to improve register allocation
-// where a consecutive multi-vector tuple is constructed from the same indices
-// of multiple strided loads. This may still result in unnecessary copies
-// between the loads and the tuple. Here we try to return a hint to assign the
-// contiguous ZPRMulReg starting at the same register as the first operand of
-// the pseudo, which should be a subregister of the first strided load.
+// We add regalloc hints for different cases:
+// * Choosing a better destination operand for predicated SVE instructions
+// where the inactive lanes are undef, by choosing a register that is not
+// unique to the other operands of the instruction.
//
-// For example, if the first strided load has been assigned $z16_z20_z24_z28
-// and the operands of the pseudo are each accessing subregister zsub2, we
-// should look through through Order to find a contiguous register which
-// begins with $z24 (i.e. $z24_z25_z26_z27).
+// * Improve register allocation for SME multi-vector instructions where we can
+// benefit from the strided- and contiguous register multi-vector tuples.
//
+// Here FORM_TRANSPOSED_REG_TUPLE nodes are created to improve register
+// allocation where a consecutive multi-vector tuple is constructed from the
+// same indices of multiple strided loads. This may still result in
+// unnecessary copies between the loads and the tuple. Here we try to return a
+// hint to assign the contiguous ZPRMulReg starting at the same register as
+// the first operand of the pseudo, which should be a subregister of the first
+// strided load.
+//
+// For example, if the first strided load has been assigned $z16_z20_z24_z28
+// and the operands of the pseudo are each accessing subregister zsub2, we
+// should look through through Order to find a contiguous register which
+// begins with $z24 (i.e. $z24_z25_z26_z27).
bool AArch64RegisterInfo::getRegAllocationHints(
Register VirtReg, ArrayRef<MCPhysReg> Order,
SmallVectorImpl<MCPhysReg> &Hints, const MachineFunction &MF,
const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const {
-
auto &ST = MF.getSubtarget<AArch64Subtarget>();
+ const AArch64InstrInfo *TII =
+ MF.getSubtarget<AArch64Subtarget>().getInstrInfo();
+ const MachineRegisterInfo &MRI = MF.getRegInfo();
+
+ // For predicated SVE instructions where the inactive lanes are undef,
+ // pick a destination register that is not unique to avoid introducing
+ // a movprfx.
+ const TargetRegisterClass *RegRC = MRI.getRegClass(VirtReg);
+ if (AArch64::ZPRRegClass.hasSubClassEq(RegRC)) {
+ for (const MachineOperand &DefOp : MRI.def_operands(VirtReg)) {
+ const MachineInstr &Def = *DefOp.getParent();
+ if (DefOp.isImplicit() ||
+ (TII->get(Def.getOpcode()).TSFlags & AArch64::FalseLanesMask) !=
+ AArch64::FalseLanesUndef)
+ continue;
+
+ unsigned InstFlags =
+ TII->get(AArch64::getSVEPseudoMap(Def.getOpcode())).TSFlags;
+
+ for (MCPhysReg R : Order) {
+ auto AddHintIfSuitable = [&](MCPhysReg R, const MachineOperand &MO) {
+ // R is a suitable register hint if there exists an operand for the
+ // instruction that is not yet allocated a register or if R matches
+ // one of the other source operands.
+ if (!VRM->hasPhys(MO.getReg()) || VRM->getPhys(MO.getReg()) == R)
+ Hints.push_back(R);
+ };
+
+ switch (InstFlags & AArch64::DestructiveInstTypeMask) {
+ default:
+ break;
+ case AArch64::DestructiveTernaryCommWithRev:
+ AddHintIfSuitable(R, Def.getOperand(2));
+ AddHintIfSuitable(R, Def.getOperand(3));
+ AddHintIfSuitable(R, Def.getOperand(4));
+ break;
+ case AArch64::DestructiveBinaryComm:
+ case AArch64::DestructiveBinaryCommWithRev:
+ AddHintIfSuitable(R, Def.getOperand(2));
+ AddHintIfSuitable(R, Def.getOperand(3));
+ break;
+ case AArch64::DestructiveBinary:
+ case AArch64::DestructiveBinaryImm:
+ AddHintIfSuitable(R, Def.getOperand(2));
+ break;
+ }
+ }
+ }
+
+ if (Hints.size())
+ return TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints,
+ MF, VRM);
+ }
+
if (!ST.hasSME() || !ST.isStreaming())
return TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints, MF,
VRM);
@@ -1153,8 +1214,7 @@ bool AArch64RegisterInfo::getRegAllocationHints(
// FORM_TRANSPOSED_REG_TUPLE pseudo, we want to favour reducing copy
// instructions over reducing the number of clobbered callee-save registers,
// so we add the strided registers as a hint.
- const MachineRegisterInfo &MRI = MF.getRegInfo();
- unsigned RegID = MRI.getRegClass(VirtReg)->getID();
+ unsigned RegID = RegRC->getID();
if (RegID == AArch64::ZPR2StridedOrContiguousRegClassID ||
RegID == AArch64::ZPR4StridedOrContiguousRegClassID) {
diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index e1f4386..65b6077 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -3597,6 +3597,18 @@ let Predicates = [HasSVE_or_SME] in {
def : Pat<(sext (i32 (vector_extract nxv4i32:$vec, VectorIndexS:$index))),
(SMOVvi32to64 (v4i32 (EXTRACT_SUBREG ZPR:$vec, zsub)), VectorIndexS:$index)>;
+
+ // Extracts of ``unsigned'' i8 or i16 elements lead to the zero-extend being
+ // transformed to an AND mask. The mask is redundant since UMOV already zeroes
+ // the high bits of the destination register.
+ def : Pat<(i32 (and (vector_extract nxv16i8:$vec, VectorIndexB:$index), 0xff)),
+ (UMOVvi8 (v16i8 (EXTRACT_SUBREG ZPR:$vec, zsub)), VectorIndexB:$index)>;
+ def : Pat<(i32 (and (vector_extract nxv8i16:$vec, VectorIndexH:$index), 0xffff)),
+ (UMOVvi16 (v8i16 (EXTRACT_SUBREG ZPR:$vec, zsub)), VectorIndexH:$index)>;
+ def : Pat<(i64 (and (i64 (anyext (i32 (vector_extract nxv16i8:$vec, VectorIndexB:$index)))), (i64 0xff))),
+ (SUBREG_TO_REG (i64 0), (i32 (UMOVvi8 (v16i8 (EXTRACT_SUBREG ZPR:$vec, zsub)), VectorIndexB:$index)), sub_32)>;
+ def : Pat<(i64 (and (i64 (anyext (i32 (vector_extract nxv8i16:$vec, VectorIndexH:$index)))), (i64 0xffff))),
+ (SUBREG_TO_REG (i64 0), (i32 (UMOVvi16 (v8i16 (EXTRACT_SUBREG ZPR:$vec, zsub)), VectorIndexH:$index)), sub_32)>;
} // End HasNEON
// Extract first element from vector.
diff --git a/llvm/lib/Target/AArch64/AArch64Subtarget.h b/llvm/lib/Target/AArch64/AArch64Subtarget.h
index 8974965..ab4004e 100644
--- a/llvm/lib/Target/AArch64/AArch64Subtarget.h
+++ b/llvm/lib/Target/AArch64/AArch64Subtarget.h
@@ -157,7 +157,7 @@ public:
bool enableMachineScheduler() const override { return true; }
bool enablePostRAScheduler() const override { return usePostRAScheduler(); }
bool enableSubRegLiveness() const override { return EnableSubregLiveness; }
-
+ bool enableTerminalRule() const override { return true; }
bool enableMachinePipeliner() const override;
bool useDFAforSMS() const override { return false; }
diff --git a/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp b/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp
index 068954f..0bf2b31 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp
+++ b/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp
@@ -54,7 +54,6 @@
#include "llvm/Transforms/Vectorize/LoopIdiomVectorize.h"
#include <memory>
#include <optional>
-#include <string>
using namespace llvm;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index 54ba2f8..dbca5af 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -5081,17 +5081,17 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
unsigned MinNumRegsRequired = DstSize / 32;
const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
+ bool UseAGPRForm = Info->selectAGPRFormMFMA(MinNumRegsRequired);
+
OpdsMapping[0] =
- Info->getMinNumAGPRs() >= MinNumRegsRequired
- ? getAGPROpMapping(MI.getOperand(0).getReg(), MRI, *TRI)
- : getVGPROpMapping(MI.getOperand(0).getReg(), MRI, *TRI);
+ UseAGPRForm ? getAGPROpMapping(MI.getOperand(0).getReg(), MRI, *TRI)
+ : getVGPROpMapping(MI.getOperand(0).getReg(), MRI, *TRI);
OpdsMapping[2] = getVGPROpMapping(MI.getOperand(2).getReg(), MRI, *TRI);
OpdsMapping[3] = getVGPROpMapping(MI.getOperand(3).getReg(), MRI, *TRI);
OpdsMapping[4] =
- Info->getMinNumAGPRs() >= MinNumRegsRequired
- ? getAGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI)
- : getVGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI);
+ UseAGPRForm ? getAGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI)
+ : getVGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI);
OpdsMapping[8] = getVGPROpMapping(MI.getOperand(8).getReg(), MRI, *TRI);
OpdsMapping[10] = getVGPROpMapping(MI.getOperand(10).getReg(), MRI, *TRI);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUResourceUsageAnalysis.cpp b/llvm/lib/Target/AMDGPU/AMDGPUResourceUsageAnalysis.cpp
index 0ea9add..b03d50f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUResourceUsageAnalysis.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUResourceUsageAnalysis.cpp
@@ -261,13 +261,6 @@ AMDGPUResourceUsageAnalysisImpl::analyzeResourceUsage(
const Function *Callee = getCalleeFunction(*CalleeOp);
- // Avoid crashing on undefined behavior with an illegal call to a
- // kernel. If a callsite's calling convention doesn't match the
- // function's, it's undefined behavior. If the callsite calling
- // convention does match, that would have errored earlier.
- if (Callee && AMDGPU::isEntryFunctionCC(Callee->getCallingConv()))
- report_fatal_error("invalid call to entry function");
-
auto isSameFunction = [](const MachineFunction &MF, const Function *F) {
return F == &MF.getFunction();
};
diff --git a/llvm/lib/Target/AMDGPU/GCNSubtarget.h b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
index f377b8a..da4bd87 100644
--- a/llvm/lib/Target/AMDGPU/GCNSubtarget.h
+++ b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
@@ -1040,6 +1040,8 @@ public:
return true;
}
+ bool enableTerminalRule() const override { return true; }
+
bool useAA() const override;
bool enableSubRegLiveness() const override {
diff --git a/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp b/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp
index 3e256cc..0104085 100644
--- a/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/R600InstrInfo.cpp
@@ -29,7 +29,7 @@ using namespace llvm;
#include "R600GenInstrInfo.inc"
R600InstrInfo::R600InstrInfo(const R600Subtarget &ST)
- : R600GenInstrInfo(ST, -1, -1), RI(), ST(ST) {}
+ : R600GenInstrInfo(ST, RI, -1, -1), RI(), ST(ST) {}
bool R600InstrInfo::isVector(const MachineInstr &MI) const {
return get(MI.getOpcode()).TSFlags & R600_InstFlag::VECTOR;
diff --git a/llvm/lib/Target/AMDGPU/R600Subtarget.h b/llvm/lib/Target/AMDGPU/R600Subtarget.h
index 22e56b6..efd99db 100644
--- a/llvm/lib/Target/AMDGPU/R600Subtarget.h
+++ b/llvm/lib/Target/AMDGPU/R600Subtarget.h
@@ -126,6 +126,8 @@ public:
return true;
}
+ bool enableTerminalRule() const override { return true; }
+
bool enableSubRegLiveness() const override {
return true;
}
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 9c74c65..6b397e0 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -63,7 +63,8 @@ static cl::opt<bool> Fix16BitCopies(
cl::ReallyHidden);
SIInstrInfo::SIInstrInfo(const GCNSubtarget &ST)
- : AMDGPUGenInstrInfo(ST, AMDGPU::ADJCALLSTACKUP, AMDGPU::ADJCALLSTACKDOWN),
+ : AMDGPUGenInstrInfo(ST, RI, AMDGPU::ADJCALLSTACKUP,
+ AMDGPU::ADJCALLSTACKDOWN),
RI(ST), ST(ST) {
SchedModel.init(&ST);
}
diff --git a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
index f0d1117..00aae2c9 100644
--- a/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
@@ -233,10 +233,11 @@ private:
void copyToDestRegs(CombineInfo &CI, CombineInfo &Paired,
MachineBasicBlock::iterator InsertBefore,
- AMDGPU::OpName OpName, Register DestReg) const;
+ const DebugLoc &DL, AMDGPU::OpName OpName,
+ Register DestReg) const;
Register copyFromSrcRegs(CombineInfo &CI, CombineInfo &Paired,
MachineBasicBlock::iterator InsertBefore,
- AMDGPU::OpName OpName) const;
+ const DebugLoc &DL, AMDGPU::OpName OpName) const;
unsigned read2Opcode(unsigned EltSize) const;
unsigned read2ST64Opcode(unsigned EltSize) const;
@@ -1367,10 +1368,9 @@ SILoadStoreOptimizer::checkAndPrepareMerge(CombineInfo &CI,
// Paired.
void SILoadStoreOptimizer::copyToDestRegs(
CombineInfo &CI, CombineInfo &Paired,
- MachineBasicBlock::iterator InsertBefore, AMDGPU::OpName OpName,
- Register DestReg) const {
+ MachineBasicBlock::iterator InsertBefore, const DebugLoc &DL,
+ AMDGPU::OpName OpName, Register DestReg) const {
MachineBasicBlock *MBB = CI.I->getParent();
- DebugLoc DL = CI.I->getDebugLoc();
auto [SubRegIdx0, SubRegIdx1] = getSubRegIdxs(CI, Paired);
@@ -1398,9 +1398,9 @@ void SILoadStoreOptimizer::copyToDestRegs(
Register
SILoadStoreOptimizer::copyFromSrcRegs(CombineInfo &CI, CombineInfo &Paired,
MachineBasicBlock::iterator InsertBefore,
+ const DebugLoc &DL,
AMDGPU::OpName OpName) const {
MachineBasicBlock *MBB = CI.I->getParent();
- DebugLoc DL = CI.I->getDebugLoc();
auto [SubRegIdx0, SubRegIdx1] = getSubRegIdxs(CI, Paired);
@@ -1456,7 +1456,8 @@ SILoadStoreOptimizer::mergeRead2Pair(CombineInfo &CI, CombineInfo &Paired,
const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI, Paired);
Register DestReg = MRI->createVirtualRegister(SuperRC);
- DebugLoc DL = CI.I->getDebugLoc();
+ DebugLoc DL =
+ DebugLoc::getMergedLocation(CI.I->getDebugLoc(), Paired.I->getDebugLoc());
Register BaseReg = AddrReg->getReg();
unsigned BaseSubReg = AddrReg->getSubReg();
@@ -1484,7 +1485,7 @@ SILoadStoreOptimizer::mergeRead2Pair(CombineInfo &CI, CombineInfo &Paired,
.addImm(0) // gds
.cloneMergedMemRefs({&*CI.I, &*Paired.I});
- copyToDestRegs(CI, Paired, InsertBefore, AMDGPU::OpName::vdst, DestReg);
+ copyToDestRegs(CI, Paired, InsertBefore, DL, AMDGPU::OpName::vdst, DestReg);
CI.I->eraseFromParent();
Paired.I->eraseFromParent();
@@ -1541,7 +1542,8 @@ MachineBasicBlock::iterator SILoadStoreOptimizer::mergeWrite2Pair(
(NewOffset0 != NewOffset1) && "Computed offset doesn't fit");
const MCInstrDesc &Write2Desc = TII->get(Opc);
- DebugLoc DL = CI.I->getDebugLoc();
+ DebugLoc DL =
+ DebugLoc::getMergedLocation(CI.I->getDebugLoc(), Paired.I->getDebugLoc());
Register BaseReg = AddrReg->getReg();
unsigned BaseSubReg = AddrReg->getSubReg();
@@ -1582,7 +1584,9 @@ MachineBasicBlock::iterator
SILoadStoreOptimizer::mergeImagePair(CombineInfo &CI, CombineInfo &Paired,
MachineBasicBlock::iterator InsertBefore) {
MachineBasicBlock *MBB = CI.I->getParent();
- DebugLoc DL = CI.I->getDebugLoc();
+ DebugLoc DL =
+ DebugLoc::getMergedLocation(CI.I->getDebugLoc(), Paired.I->getDebugLoc());
+
const unsigned Opcode = getNewOpcode(CI, Paired);
const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI, Paired);
@@ -1607,7 +1611,7 @@ SILoadStoreOptimizer::mergeImagePair(CombineInfo &CI, CombineInfo &Paired,
MachineInstr *New = MIB.addMemOperand(combineKnownAdjacentMMOs(CI, Paired));
- copyToDestRegs(CI, Paired, InsertBefore, AMDGPU::OpName::vdata, DestReg);
+ copyToDestRegs(CI, Paired, InsertBefore, DL, AMDGPU::OpName::vdata, DestReg);
CI.I->eraseFromParent();
Paired.I->eraseFromParent();
@@ -1618,7 +1622,9 @@ MachineBasicBlock::iterator SILoadStoreOptimizer::mergeSMemLoadImmPair(
CombineInfo &CI, CombineInfo &Paired,
MachineBasicBlock::iterator InsertBefore) {
MachineBasicBlock *MBB = CI.I->getParent();
- DebugLoc DL = CI.I->getDebugLoc();
+ DebugLoc DL =
+ DebugLoc::getMergedLocation(CI.I->getDebugLoc(), Paired.I->getDebugLoc());
+
const unsigned Opcode = getNewOpcode(CI, Paired);
const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI, Paired);
@@ -1639,7 +1645,7 @@ MachineBasicBlock::iterator SILoadStoreOptimizer::mergeSMemLoadImmPair(
New.addImm(MergedOffset);
New.addImm(CI.CPol).addMemOperand(combineKnownAdjacentMMOs(CI, Paired));
- copyToDestRegs(CI, Paired, InsertBefore, AMDGPU::OpName::sdst, DestReg);
+ copyToDestRegs(CI, Paired, InsertBefore, DL, AMDGPU::OpName::sdst, DestReg);
CI.I->eraseFromParent();
Paired.I->eraseFromParent();
@@ -1650,7 +1656,9 @@ MachineBasicBlock::iterator SILoadStoreOptimizer::mergeBufferLoadPair(
CombineInfo &CI, CombineInfo &Paired,
MachineBasicBlock::iterator InsertBefore) {
MachineBasicBlock *MBB = CI.I->getParent();
- DebugLoc DL = CI.I->getDebugLoc();
+
+ DebugLoc DL =
+ DebugLoc::getMergedLocation(CI.I->getDebugLoc(), Paired.I->getDebugLoc());
const unsigned Opcode = getNewOpcode(CI, Paired);
@@ -1680,7 +1688,7 @@ MachineBasicBlock::iterator SILoadStoreOptimizer::mergeBufferLoadPair(
.addImm(0) // swz
.addMemOperand(combineKnownAdjacentMMOs(CI, Paired));
- copyToDestRegs(CI, Paired, InsertBefore, AMDGPU::OpName::vdata, DestReg);
+ copyToDestRegs(CI, Paired, InsertBefore, DL, AMDGPU::OpName::vdata, DestReg);
CI.I->eraseFromParent();
Paired.I->eraseFromParent();
@@ -1691,7 +1699,9 @@ MachineBasicBlock::iterator SILoadStoreOptimizer::mergeTBufferLoadPair(
CombineInfo &CI, CombineInfo &Paired,
MachineBasicBlock::iterator InsertBefore) {
MachineBasicBlock *MBB = CI.I->getParent();
- DebugLoc DL = CI.I->getDebugLoc();
+
+ DebugLoc DL =
+ DebugLoc::getMergedLocation(CI.I->getDebugLoc(), Paired.I->getDebugLoc());
const unsigned Opcode = getNewOpcode(CI, Paired);
@@ -1731,7 +1741,7 @@ MachineBasicBlock::iterator SILoadStoreOptimizer::mergeTBufferLoadPair(
.addImm(0) // swz
.addMemOperand(combineKnownAdjacentMMOs(CI, Paired));
- copyToDestRegs(CI, Paired, InsertBefore, AMDGPU::OpName::vdata, DestReg);
+ copyToDestRegs(CI, Paired, InsertBefore, DL, AMDGPU::OpName::vdata, DestReg);
CI.I->eraseFromParent();
Paired.I->eraseFromParent();
@@ -1742,12 +1752,13 @@ MachineBasicBlock::iterator SILoadStoreOptimizer::mergeTBufferStorePair(
CombineInfo &CI, CombineInfo &Paired,
MachineBasicBlock::iterator InsertBefore) {
MachineBasicBlock *MBB = CI.I->getParent();
- DebugLoc DL = CI.I->getDebugLoc();
+ DebugLoc DL =
+ DebugLoc::getMergedLocation(CI.I->getDebugLoc(), Paired.I->getDebugLoc());
const unsigned Opcode = getNewOpcode(CI, Paired);
Register SrcReg =
- copyFromSrcRegs(CI, Paired, InsertBefore, AMDGPU::OpName::vdata);
+ copyFromSrcRegs(CI, Paired, InsertBefore, DL, AMDGPU::OpName::vdata);
auto MIB = BuildMI(*MBB, InsertBefore, DL, TII->get(Opcode))
.addReg(SrcReg, RegState::Kill);
@@ -1789,7 +1800,9 @@ MachineBasicBlock::iterator SILoadStoreOptimizer::mergeFlatLoadPair(
CombineInfo &CI, CombineInfo &Paired,
MachineBasicBlock::iterator InsertBefore) {
MachineBasicBlock *MBB = CI.I->getParent();
- DebugLoc DL = CI.I->getDebugLoc();
+
+ DebugLoc DL =
+ DebugLoc::getMergedLocation(CI.I->getDebugLoc(), Paired.I->getDebugLoc());
const unsigned Opcode = getNewOpcode(CI, Paired);
@@ -1807,7 +1820,7 @@ MachineBasicBlock::iterator SILoadStoreOptimizer::mergeFlatLoadPair(
.addImm(CI.CPol)
.addMemOperand(combineKnownAdjacentMMOs(CI, Paired));
- copyToDestRegs(CI, Paired, InsertBefore, AMDGPU::OpName::vdst, DestReg);
+ copyToDestRegs(CI, Paired, InsertBefore, DL, AMDGPU::OpName::vdst, DestReg);
CI.I->eraseFromParent();
Paired.I->eraseFromParent();
@@ -1818,12 +1831,14 @@ MachineBasicBlock::iterator SILoadStoreOptimizer::mergeFlatStorePair(
CombineInfo &CI, CombineInfo &Paired,
MachineBasicBlock::iterator InsertBefore) {
MachineBasicBlock *MBB = CI.I->getParent();
- DebugLoc DL = CI.I->getDebugLoc();
+
+ DebugLoc DL =
+ DebugLoc::getMergedLocation(CI.I->getDebugLoc(), Paired.I->getDebugLoc());
const unsigned Opcode = getNewOpcode(CI, Paired);
Register SrcReg =
- copyFromSrcRegs(CI, Paired, InsertBefore, AMDGPU::OpName::vdata);
+ copyFromSrcRegs(CI, Paired, InsertBefore, DL, AMDGPU::OpName::vdata);
auto MIB = BuildMI(*MBB, InsertBefore, DL, TII->get(Opcode))
.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr))
@@ -2094,12 +2109,13 @@ MachineBasicBlock::iterator SILoadStoreOptimizer::mergeBufferStorePair(
CombineInfo &CI, CombineInfo &Paired,
MachineBasicBlock::iterator InsertBefore) {
MachineBasicBlock *MBB = CI.I->getParent();
- DebugLoc DL = CI.I->getDebugLoc();
+ DebugLoc DL =
+ DebugLoc::getMergedLocation(CI.I->getDebugLoc(), Paired.I->getDebugLoc());
const unsigned Opcode = getNewOpcode(CI, Paired);
Register SrcReg =
- copyFromSrcRegs(CI, Paired, InsertBefore, AMDGPU::OpName::vdata);
+ copyFromSrcRegs(CI, Paired, InsertBefore, DL, AMDGPU::OpName::vdata);
auto MIB = BuildMI(*MBB, InsertBefore, DL, TII->get(Opcode))
.addReg(SrcReg, RegState::Kill);
diff --git a/llvm/lib/Target/AMDGPU/VOP1Instructions.td b/llvm/lib/Target/AMDGPU/VOP1Instructions.td
index 54f57e0..85adcab 100644
--- a/llvm/lib/Target/AMDGPU/VOP1Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP1Instructions.td
@@ -513,6 +513,13 @@ defm V_CVT_U16_F16 : VOP1Inst_t16_with_profiles <"v_cvt_u16_f16",
defm V_CVT_I16_F16 : VOP1Inst_t16_with_profiles <"v_cvt_i16_f16",
VOP_I16_F16_SPECIAL_OMOD, VOP_I16_F16_SPECIAL_OMOD_t16, VOP_I16_F16_SPECIAL_OMOD_fake16, fp_to_sint>;
+
+let HasClamp = 0, HasOMod = 0 in {
+def V_TRANS_BF16_Profile : VOPProfile <[bf16, bf16, untyped, untyped]>;
+def V_TRANS_BF16_t16_Profile : VOPProfile_True16 <VOP_BF16_BF16>;
+def V_TRANS_BF16_fake16_Profile : VOPProfile_Fake16 <VOP_BF16_BF16>;
+}
+
let TRANS = 1, SchedRW = [WriteTrans32] in {
defm V_RCP_F16 : VOP1Inst_t16 <"v_rcp_f16", VOP_F16_F16, AMDGPUrcp>;
defm V_SQRT_F16 : VOP1Inst_t16 <"v_sqrt_f16", VOP_F16_F16, any_amdgcn_sqrt>;
@@ -527,14 +534,30 @@ defm V_TANH_F16 : VOP1Inst_t16 <"v_tanh_f16", VOP_F16_F16, int_amdgcn_tanh>;
}
let SubtargetPredicate = HasBF16TransInsts in {
-defm V_TANH_BF16 : VOP1Inst_t16 <"v_tanh_bf16", VOP_BF16_BF16, int_amdgcn_tanh>;
-defm V_RCP_BF16 : VOP1Inst_t16 <"v_rcp_bf16", VOP_BF16_BF16, AMDGPUrcp>;
-defm V_SQRT_BF16 : VOP1Inst_t16 <"v_sqrt_bf16", VOP_BF16_BF16, any_amdgcn_sqrt>;
-defm V_RSQ_BF16 : VOP1Inst_t16 <"v_rsq_bf16", VOP_BF16_BF16, AMDGPUrsq>;
-defm V_LOG_BF16 : VOP1Inst_t16 <"v_log_bf16", VOP_BF16_BF16, AMDGPUlogf16>;
-defm V_EXP_BF16 : VOP1Inst_t16 <"v_exp_bf16", VOP_BF16_BF16, AMDGPUexpf16>;
-defm V_SIN_BF16 : VOP1Inst_t16 <"v_sin_bf16", VOP_BF16_BF16, AMDGPUsin>;
-defm V_COS_BF16 : VOP1Inst_t16 <"v_cos_bf16", VOP_BF16_BF16, AMDGPUcos>;
+defm V_TANH_BF16 : VOP1Inst_t16_with_profiles<"v_tanh_bf16", V_TRANS_BF16_Profile,
+ V_TRANS_BF16_t16_Profile, V_TRANS_BF16_fake16_Profile,
+ int_amdgcn_tanh>;
+defm V_RCP_BF16 : VOP1Inst_t16_with_profiles<"v_rcp_bf16", V_TRANS_BF16_Profile,
+ V_TRANS_BF16_t16_Profile, V_TRANS_BF16_fake16_Profile,
+ AMDGPUrcp>;
+defm V_SQRT_BF16 : VOP1Inst_t16_with_profiles<"v_sqrt_bf16", V_TRANS_BF16_Profile,
+ V_TRANS_BF16_t16_Profile, V_TRANS_BF16_fake16_Profile,
+ any_amdgcn_sqrt>;
+defm V_RSQ_BF16 : VOP1Inst_t16_with_profiles<"v_rsq_bf16", V_TRANS_BF16_Profile,
+ V_TRANS_BF16_t16_Profile, V_TRANS_BF16_fake16_Profile,
+ AMDGPUrsq>;
+defm V_LOG_BF16 : VOP1Inst_t16_with_profiles<"v_log_bf16", V_TRANS_BF16_Profile,
+ V_TRANS_BF16_t16_Profile, V_TRANS_BF16_fake16_Profile,
+ AMDGPUlogf16>;
+defm V_EXP_BF16 : VOP1Inst_t16_with_profiles<"v_exp_bf16", V_TRANS_BF16_Profile,
+ V_TRANS_BF16_t16_Profile, V_TRANS_BF16_fake16_Profile,
+ AMDGPUexpf16>;
+defm V_SIN_BF16 : VOP1Inst_t16_with_profiles<"v_sin_bf16", V_TRANS_BF16_Profile,
+ V_TRANS_BF16_t16_Profile, V_TRANS_BF16_fake16_Profile,
+ AMDGPUsin>;
+defm V_COS_BF16 : VOP1Inst_t16_with_profiles<"v_cos_bf16", V_TRANS_BF16_Profile,
+ V_TRANS_BF16_t16_Profile, V_TRANS_BF16_fake16_Profile,
+ AMDGPUcos>;
}
} // End TRANS = 1, SchedRW = [WriteTrans32]
defm V_FREXP_MANT_F16 : VOP1Inst_t16 <"v_frexp_mant_f16", VOP_F16_F16, int_amdgcn_frexp_mant>;
diff --git a/llvm/lib/Target/AMDGPU/VOPInstructions.td b/llvm/lib/Target/AMDGPU/VOPInstructions.td
index 8325c62..6df91a1 100644
--- a/llvm/lib/Target/AMDGPU/VOPInstructions.td
+++ b/llvm/lib/Target/AMDGPU/VOPInstructions.td
@@ -1357,8 +1357,12 @@ class VOPBinOpClampPat<SDPatternOperator node, Instruction inst, ValueType vt> :
class getVOP3ModPat<VOPProfile P, SDPatternOperator node> {
dag src0 = !if(P.HasOMod,
- (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp, i32:$omod),
- (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp));
+ !if(P.HasClamp,
+ (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp, i32:$omod),
+ (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers, i32:$omod)),
+ !if(P.HasClamp,
+ (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp),
+ (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers)));
list<dag> ret3 = [(set P.DstVT:$vdst,
(DivergentFragOrOp<node, P>.ret (P.Src0VT src0),
diff --git a/llvm/lib/Target/ARC/ARCInstrInfo.cpp b/llvm/lib/Target/ARC/ARCInstrInfo.cpp
index 05bcb35..2dec6ff 100644
--- a/llvm/lib/Target/ARC/ARCInstrInfo.cpp
+++ b/llvm/lib/Target/ARC/ARCInstrInfo.cpp
@@ -44,7 +44,8 @@ enum TSFlagsConstants {
void ARCInstrInfo::anchor() {}
ARCInstrInfo::ARCInstrInfo(const ARCSubtarget &ST)
- : ARCGenInstrInfo(ST, ARC::ADJCALLSTACKDOWN, ARC::ADJCALLSTACKUP), RI(ST) {}
+ : ARCGenInstrInfo(ST, RI, ARC::ADJCALLSTACKDOWN, ARC::ADJCALLSTACKUP),
+ RI(ST) {}
static bool isZeroImm(const MachineOperand &Op) {
return Op.isImm() && Op.getImm() == 0;
diff --git a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
index 22769db..b466ca6f 100644
--- a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
@@ -107,8 +107,9 @@ static const ARM_MLxEntry ARM_MLxTable[] = {
{ ARM::VMLSslfq, ARM::VMULslfq, ARM::VSUBfq, false, true },
};
-ARMBaseInstrInfo::ARMBaseInstrInfo(const ARMSubtarget &STI)
- : ARMGenInstrInfo(STI, ARM::ADJCALLSTACKDOWN, ARM::ADJCALLSTACKUP),
+ARMBaseInstrInfo::ARMBaseInstrInfo(const ARMSubtarget &STI,
+ const ARMBaseRegisterInfo &TRI)
+ : ARMGenInstrInfo(STI, TRI, ARM::ADJCALLSTACKDOWN, ARM::ADJCALLSTACKUP),
Subtarget(STI) {
for (unsigned i = 0, e = std::size(ARM_MLxTable); i != e; ++i) {
if (!MLxEntryMap.insert(std::make_pair(ARM_MLxTable[i].MLxOpc, i)).second)
diff --git a/llvm/lib/Target/ARM/ARMBaseInstrInfo.h b/llvm/lib/Target/ARM/ARMBaseInstrInfo.h
index 2869e7f..27f8e3b 100644
--- a/llvm/lib/Target/ARM/ARMBaseInstrInfo.h
+++ b/llvm/lib/Target/ARM/ARMBaseInstrInfo.h
@@ -44,7 +44,8 @@ class ARMBaseInstrInfo : public ARMGenInstrInfo {
protected:
// Can be only subclassed.
- explicit ARMBaseInstrInfo(const ARMSubtarget &STI);
+ explicit ARMBaseInstrInfo(const ARMSubtarget &STI,
+ const ARMBaseRegisterInfo &TRI);
void expandLoadStackGuardBase(MachineBasicBlock::iterator MI,
unsigned LoadImmOpc, unsigned LoadOpc) const;
@@ -125,7 +126,11 @@ public:
// if there is not such an opcode.
virtual unsigned getUnindexedOpcode(unsigned Opc) const = 0;
- virtual const ARMBaseRegisterInfo &getRegisterInfo() const = 0;
+ const ARMBaseRegisterInfo &getRegisterInfo() const {
+ return static_cast<const ARMBaseRegisterInfo &>(
+ TargetInstrInfo::getRegisterInfo());
+ }
+
const ARMSubtarget &getSubtarget() const { return Subtarget; }
ScheduleHazardRecognizer *
diff --git a/llvm/lib/Target/ARM/ARMInstrInfo.cpp b/llvm/lib/Target/ARM/ARMInstrInfo.cpp
index c684de7..f370547 100644
--- a/llvm/lib/Target/ARM/ARMInstrInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMInstrInfo.cpp
@@ -25,7 +25,8 @@
#include "llvm/MC/MCInst.h"
using namespace llvm;
-ARMInstrInfo::ARMInstrInfo(const ARMSubtarget &STI) : ARMBaseInstrInfo(STI) {}
+ARMInstrInfo::ARMInstrInfo(const ARMSubtarget &STI)
+ : ARMBaseInstrInfo(STI, RI) {}
/// Return the noop instruction to use for a noop.
MCInst ARMInstrInfo::getNop() const {
diff --git a/llvm/lib/Target/ARM/ARMInstrInfo.h b/llvm/lib/Target/ARM/ARMInstrInfo.h
index 178d7a2..9feaf14 100644
--- a/llvm/lib/Target/ARM/ARMInstrInfo.h
+++ b/llvm/lib/Target/ARM/ARMInstrInfo.h
@@ -35,7 +35,7 @@ public:
/// such, whenever a client has an instance of instruction info, it should
/// always be able to get register info as well (through this method).
///
- const ARMRegisterInfo &getRegisterInfo() const override { return RI; }
+ const ARMRegisterInfo &getRegisterInfo() const { return RI; }
private:
void expandLoadStackGuard(MachineBasicBlock::iterator MI) const override;
diff --git a/llvm/lib/Target/ARM/ARMSubtarget.h b/llvm/lib/Target/ARM/ARMSubtarget.h
index 4a0883c..34baa31 100644
--- a/llvm/lib/Target/ARM/ARMSubtarget.h
+++ b/llvm/lib/Target/ARM/ARMSubtarget.h
@@ -377,6 +377,7 @@ public:
bool isRWPI() const;
bool useMachineScheduler() const { return UseMISched; }
+ bool enableTerminalRule() const override { return true; }
bool useMachinePipeliner() const { return UseMIPipeliner; }
bool hasMinSize() const { return OptMinSize; }
bool isThumb1Only() const { return isThumb() && !hasThumb2(); }
diff --git a/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp b/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp
index 4b8c2fd..f95ba6a4 100644
--- a/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp
+++ b/llvm/lib/Target/ARM/Thumb1InstrInfo.cpp
@@ -24,7 +24,7 @@
using namespace llvm;
Thumb1InstrInfo::Thumb1InstrInfo(const ARMSubtarget &STI)
- : ARMBaseInstrInfo(STI), RI(STI) {}
+ : ARMBaseInstrInfo(STI, RI), RI(STI) {}
/// Return the noop instruction to use for a noop.
MCInst Thumb1InstrInfo::getNop() const {
diff --git a/llvm/lib/Target/ARM/Thumb1InstrInfo.h b/llvm/lib/Target/ARM/Thumb1InstrInfo.h
index 68b326c..16350a6 100644
--- a/llvm/lib/Target/ARM/Thumb1InstrInfo.h
+++ b/llvm/lib/Target/ARM/Thumb1InstrInfo.h
@@ -35,7 +35,7 @@ public:
/// such, whenever a client has an instance of instruction info, it should
/// always be able to get register info as well (through this method).
///
- const ThumbRegisterInfo &getRegisterInfo() const override { return RI; }
+ const ThumbRegisterInfo &getRegisterInfo() const { return RI; }
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
const DebugLoc &DL, Register DestReg, Register SrcReg,
diff --git a/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp b/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp
index f5653d4..b66e407 100644
--- a/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp
+++ b/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp
@@ -46,7 +46,7 @@ PreferNoCSEL("prefer-no-csel", cl::Hidden,
cl::init(false));
Thumb2InstrInfo::Thumb2InstrInfo(const ARMSubtarget &STI)
- : ARMBaseInstrInfo(STI), RI(STI) {}
+ : ARMBaseInstrInfo(STI, RI), RI(STI) {}
/// Return the noop instruction to use for a noop.
MCInst Thumb2InstrInfo::getNop() const {
diff --git a/llvm/lib/Target/ARM/Thumb2InstrInfo.h b/llvm/lib/Target/ARM/Thumb2InstrInfo.h
index 1b0bf2d..59ef39d 100644
--- a/llvm/lib/Target/ARM/Thumb2InstrInfo.h
+++ b/llvm/lib/Target/ARM/Thumb2InstrInfo.h
@@ -58,7 +58,7 @@ public:
/// such, whenever a client has an instance of instruction info, it should
/// always be able to get register info as well (through this method).
///
- const ThumbRegisterInfo &getRegisterInfo() const override { return RI; }
+ const ThumbRegisterInfo &getRegisterInfo() const { return RI; }
MachineInstr *optimizeSelect(MachineInstr &MI,
SmallPtrSetImpl<MachineInstr *> &SeenMIs,
diff --git a/llvm/lib/Target/AVR/AVRInstrInfo.cpp b/llvm/lib/Target/AVR/AVRInstrInfo.cpp
index ce99085..5e247cb 100644
--- a/llvm/lib/Target/AVR/AVRInstrInfo.cpp
+++ b/llvm/lib/Target/AVR/AVRInstrInfo.cpp
@@ -30,8 +30,8 @@
namespace llvm {
AVRInstrInfo::AVRInstrInfo(const AVRSubtarget &STI)
- : AVRGenInstrInfo(STI, AVR::ADJCALLSTACKDOWN, AVR::ADJCALLSTACKUP), RI(),
- STI(STI) {}
+ : AVRGenInstrInfo(STI, RI, AVR::ADJCALLSTACKDOWN, AVR::ADJCALLSTACKUP),
+ RI(), STI(STI) {}
void AVRInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
diff --git a/llvm/lib/Target/AVR/AVRTargetTransformInfo.h b/llvm/lib/Target/AVR/AVRTargetTransformInfo.h
index 0daeeb8..338a7c8 100644
--- a/llvm/lib/Target/AVR/AVRTargetTransformInfo.h
+++ b/llvm/lib/Target/AVR/AVRTargetTransformInfo.h
@@ -21,7 +21,6 @@
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/CodeGen/BasicTTIImpl.h"
#include "llvm/IR/Function.h"
-#include <optional>
namespace llvm {
diff --git a/llvm/lib/Target/BPF/BPFInstrInfo.cpp b/llvm/lib/Target/BPF/BPFInstrInfo.cpp
index 409f8b4..0e56e65 100644
--- a/llvm/lib/Target/BPF/BPFInstrInfo.cpp
+++ b/llvm/lib/Target/BPF/BPFInstrInfo.cpp
@@ -27,7 +27,7 @@
using namespace llvm;
BPFInstrInfo::BPFInstrInfo(const BPFSubtarget &STI)
- : BPFGenInstrInfo(STI, BPF::ADJCALLSTACKDOWN, BPF::ADJCALLSTACKUP) {}
+ : BPFGenInstrInfo(STI, RI, BPF::ADJCALLSTACKDOWN, BPF::ADJCALLSTACKUP) {}
void BPFInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
diff --git a/llvm/lib/Target/CSKY/CSKYConstantIslandPass.cpp b/llvm/lib/Target/CSKY/CSKYConstantIslandPass.cpp
index 7885d93..a2cf0a5 100644
--- a/llvm/lib/Target/CSKY/CSKYConstantIslandPass.cpp
+++ b/llvm/lib/Target/CSKY/CSKYConstantIslandPass.cpp
@@ -48,7 +48,6 @@
#include "llvm/Support/Format.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
-#include <algorithm>
#include <cassert>
#include <cstdint>
#include <iterator>
diff --git a/llvm/lib/Target/CSKY/CSKYInstrInfo.cpp b/llvm/lib/Target/CSKY/CSKYInstrInfo.cpp
index 619a797..34a7de8 100644
--- a/llvm/lib/Target/CSKY/CSKYInstrInfo.cpp
+++ b/llvm/lib/Target/CSKY/CSKYInstrInfo.cpp
@@ -25,7 +25,7 @@ using namespace llvm;
#include "CSKYGenInstrInfo.inc"
CSKYInstrInfo::CSKYInstrInfo(const CSKYSubtarget &STI)
- : CSKYGenInstrInfo(STI, CSKY::ADJCALLSTACKDOWN, CSKY::ADJCALLSTACKUP),
+ : CSKYGenInstrInfo(STI, RI, CSKY::ADJCALLSTACKDOWN, CSKY::ADJCALLSTACKUP),
STI(STI) {
v2sf = STI.hasFPUv2SingleFloat();
v2df = STI.hasFPUv2DoubleFloat();
diff --git a/llvm/lib/Target/DirectX/DXContainerGlobals.cpp b/llvm/lib/Target/DirectX/DXContainerGlobals.cpp
index 677203d..95577dd 100644
--- a/llvm/lib/Target/DirectX/DXContainerGlobals.cpp
+++ b/llvm/lib/Target/DirectX/DXContainerGlobals.cpp
@@ -29,7 +29,6 @@
#include "llvm/TargetParser/Triple.h"
#include "llvm/Transforms/Utils/ModuleUtils.h"
#include <cstdint>
-#include <optional>
using namespace llvm;
using namespace llvm::dxil;
diff --git a/llvm/lib/Target/DirectX/DXILResourceAccess.cpp b/llvm/lib/Target/DirectX/DXILResourceAccess.cpp
index 6579d34..057d87b 100644
--- a/llvm/lib/Target/DirectX/DXILResourceAccess.cpp
+++ b/llvm/lib/Target/DirectX/DXILResourceAccess.cpp
@@ -10,6 +10,7 @@
#include "DirectX.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/Analysis/DXILResource.h"
+#include "llvm/Frontend/HLSL/HLSLResource.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/IRBuilder.h"
@@ -20,6 +21,7 @@
#include "llvm/IR/IntrinsicsDirectX.h"
#include "llvm/IR/User.h"
#include "llvm/InitializePasses.h"
+#include "llvm/Support/FormatVariadic.h"
#include "llvm/Transforms/Utils/ValueMapper.h"
#define DEBUG_TYPE "dxil-resource-access"
@@ -44,16 +46,28 @@ static Value *calculateGEPOffset(GetElementPtrInst *GEP, Value *PrevOffset,
APInt ConstantOffset(DL.getIndexTypeSizeInBits(GEP->getType()), 0);
if (GEP->accumulateConstantOffset(DL, ConstantOffset)) {
APInt Scaled = ConstantOffset.udiv(ScalarSize);
- return ConstantInt::get(Type::getInt32Ty(GEP->getContext()), Scaled);
+ return ConstantInt::get(DL.getIndexType(GEP->getType()), Scaled);
}
- auto IndexIt = GEP->idx_begin();
- assert(cast<ConstantInt>(IndexIt)->getZExtValue() == 0 &&
- "GEP is not indexing through pointer");
- ++IndexIt;
- Value *Offset = *IndexIt;
- assert(++IndexIt == GEP->idx_end() && "Too many indices in GEP");
- return Offset;
+ unsigned NumIndices = GEP->getNumIndices();
+
+ // If we have a single index we're indexing into a top level array. This
+ // generally only happens with cbuffers.
+ if (NumIndices == 1)
+ return *GEP->idx_begin();
+
+ // If we have two indices, this should be a simple access through a pointer.
+ if (NumIndices == 2) {
+ auto IndexIt = GEP->idx_begin();
+ assert(cast<ConstantInt>(IndexIt)->getZExtValue() == 0 &&
+ "GEP is not indexing through pointer");
+ ++IndexIt;
+ Value *Offset = *IndexIt;
+ assert(++IndexIt == GEP->idx_end() && "Too many indices in GEP");
+ return Offset;
+ }
+
+ llvm_unreachable("Unhandled GEP structure for resource access");
}
static void createTypedBufferStore(IntrinsicInst *II, StoreInst *SI,
@@ -171,6 +185,127 @@ static void createRawLoad(IntrinsicInst *II, LoadInst *LI, Value *Offset) {
LI->replaceAllUsesWith(V);
}
+namespace {
+/// Helper for building a `load.cbufferrow` intrinsic given a simple type.
+struct CBufferRowIntrin {
+ Intrinsic::ID IID;
+ Type *RetTy;
+ unsigned int EltSize;
+ unsigned int NumElts;
+
+ CBufferRowIntrin(const DataLayout &DL, Type *Ty) {
+ assert(Ty == Ty->getScalarType() && "Expected scalar type");
+
+ switch (DL.getTypeSizeInBits(Ty)) {
+ case 16:
+ IID = Intrinsic::dx_resource_load_cbufferrow_8;
+ RetTy = StructType::get(Ty, Ty, Ty, Ty, Ty, Ty, Ty, Ty);
+ EltSize = 2;
+ NumElts = 8;
+ break;
+ case 32:
+ IID = Intrinsic::dx_resource_load_cbufferrow_4;
+ RetTy = StructType::get(Ty, Ty, Ty, Ty);
+ EltSize = 4;
+ NumElts = 4;
+ break;
+ case 64:
+ IID = Intrinsic::dx_resource_load_cbufferrow_2;
+ RetTy = StructType::get(Ty, Ty);
+ EltSize = 8;
+ NumElts = 2;
+ break;
+ default:
+ llvm_unreachable("Only 16, 32, and 64 bit types supported");
+ }
+ }
+};
+} // namespace
+
+static void createCBufferLoad(IntrinsicInst *II, LoadInst *LI, Value *Offset,
+ dxil::ResourceTypeInfo &RTI) {
+ const DataLayout &DL = LI->getDataLayout();
+
+ Type *Ty = LI->getType();
+ assert(!isa<StructType>(Ty) && "Structs not handled yet");
+ CBufferRowIntrin Intrin(DL, Ty->getScalarType());
+
+ StringRef Name = LI->getName();
+ Value *Handle = II->getOperand(0);
+
+ IRBuilder<> Builder(LI);
+
+ ConstantInt *GlobalOffset = dyn_cast<ConstantInt>(II->getOperand(1));
+ assert(GlobalOffset && "CBuffer getpointer index must be constant");
+
+ unsigned int FixedOffset = GlobalOffset->getZExtValue();
+ // If we have a further constant offset we can just fold it in to the fixed
+ // offset.
+ if (auto *ConstOffset = dyn_cast_if_present<ConstantInt>(Offset)) {
+ FixedOffset += ConstOffset->getZExtValue();
+ Offset = nullptr;
+ }
+
+ Value *CurrentRow = ConstantInt::get(
+ Builder.getInt32Ty(), FixedOffset / hlsl::CBufferRowSizeInBytes);
+ unsigned int CurrentIndex =
+ (FixedOffset % hlsl::CBufferRowSizeInBytes) / Intrin.EltSize;
+
+ assert(!(CurrentIndex && Offset) &&
+ "Dynamic indexing into elements of cbuffer rows is not supported");
+ // At this point if we have a non-constant offset it has to be an array
+ // offset, so we can assume that it's a multiple of the row size.
+ if (Offset)
+ CurrentRow = FixedOffset ? Builder.CreateAdd(CurrentRow, Offset) : Offset;
+
+ auto *CBufLoad = Builder.CreateIntrinsic(
+ Intrin.RetTy, Intrin.IID, {Handle, CurrentRow}, nullptr, Name + ".load");
+ auto *Elt =
+ Builder.CreateExtractValue(CBufLoad, {CurrentIndex++}, Name + ".extract");
+
+ // At this point we've loaded the first scalar of our result, but our original
+ // type may have been a vector.
+ unsigned int Remaining =
+ ((DL.getTypeSizeInBits(Ty) / 8) / Intrin.EltSize) - 1;
+ if (Remaining == 0) {
+ // We only have a single element, so we're done.
+ Value *Result = Elt;
+
+ // However, if we loaded a <1 x T>, then we need to adjust the type.
+ if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
+ assert(VT->getNumElements() == 1 && "Can't have multiple elements here");
+ Result = Builder.CreateInsertElement(PoisonValue::get(VT), Result,
+ Builder.getInt32(0), Name);
+ }
+ LI->replaceAllUsesWith(Result);
+ return;
+ }
+
+ // Walk each element and extract it, wrapping to new rows as needed.
+ SmallVector<Value *> Extracts{Elt};
+ while (Remaining--) {
+ CurrentIndex %= Intrin.NumElts;
+
+ if (CurrentIndex == 0) {
+ CurrentRow = Builder.CreateAdd(CurrentRow,
+ ConstantInt::get(Builder.getInt32Ty(), 1));
+ CBufLoad = Builder.CreateIntrinsic(Intrin.RetTy, Intrin.IID,
+ {Handle, CurrentRow}, nullptr,
+ Name + ".load");
+ }
+
+ Extracts.push_back(Builder.CreateExtractValue(CBufLoad, {CurrentIndex++},
+ Name + ".extract"));
+ }
+
+ // Finally, we build up the original loaded value.
+ Value *Result = PoisonValue::get(Ty);
+ for (int I = 0, E = Extracts.size(); I < E; ++I)
+ Result = Builder.CreateInsertElement(
+ Result, Extracts[I], Builder.getInt32(I), Name + formatv(".upto{}", I));
+ LI->replaceAllUsesWith(Result);
+}
+
static void createLoadIntrinsic(IntrinsicInst *II, LoadInst *LI, Value *Offset,
dxil::ResourceTypeInfo &RTI) {
switch (RTI.getResourceKind()) {
@@ -179,6 +314,8 @@ static void createLoadIntrinsic(IntrinsicInst *II, LoadInst *LI, Value *Offset,
case dxil::ResourceKind::RawBuffer:
case dxil::ResourceKind::StructuredBuffer:
return createRawLoad(II, LI, Offset);
+ case dxil::ResourceKind::CBuffer:
+ return createCBufferLoad(II, LI, Offset, RTI);
case dxil::ResourceKind::Texture1D:
case dxil::ResourceKind::Texture2D:
case dxil::ResourceKind::Texture2DMS:
@@ -190,9 +327,8 @@ static void createLoadIntrinsic(IntrinsicInst *II, LoadInst *LI, Value *Offset,
case dxil::ResourceKind::TextureCubeArray:
case dxil::ResourceKind::FeedbackTexture2D:
case dxil::ResourceKind::FeedbackTexture2DArray:
- case dxil::ResourceKind::CBuffer:
case dxil::ResourceKind::TBuffer:
- // TODO: handle these
+ reportFatalUsageError("Load not yet implemented for resource type");
return;
case dxil::ResourceKind::Sampler:
case dxil::ResourceKind::RTAccelerationStructure:
diff --git a/llvm/lib/Target/DirectX/DXILRootSignature.h b/llvm/lib/Target/DirectX/DXILRootSignature.h
index b990b6c..ec82aa9 100644
--- a/llvm/lib/Target/DirectX/DXILRootSignature.h
+++ b/llvm/lib/Target/DirectX/DXILRootSignature.h
@@ -21,7 +21,6 @@
#include "llvm/IR/PassManager.h"
#include "llvm/MC/DXContainerRootSignature.h"
#include "llvm/Pass.h"
-#include <optional>
namespace llvm {
namespace dxil {
diff --git a/llvm/lib/Target/DirectX/DXILWriter/DXILBitcodeWriter.h b/llvm/lib/Target/DirectX/DXILWriter/DXILBitcodeWriter.h
index f2c00c7..7cbc092 100644
--- a/llvm/lib/Target/DirectX/DXILWriter/DXILBitcodeWriter.h
+++ b/llvm/lib/Target/DirectX/DXILWriter/DXILBitcodeWriter.h
@@ -19,7 +19,6 @@
#include "llvm/Support/Allocator.h"
#include "llvm/Support/MemoryBufferRef.h"
#include <memory>
-#include <string>
#include <vector>
namespace llvm {
diff --git a/llvm/lib/Target/DirectX/DirectXInstrInfo.cpp b/llvm/lib/Target/DirectX/DirectXInstrInfo.cpp
index bb2efa4..401881d 100644
--- a/llvm/lib/Target/DirectX/DirectXInstrInfo.cpp
+++ b/llvm/lib/Target/DirectX/DirectXInstrInfo.cpp
@@ -19,6 +19,6 @@
using namespace llvm;
DirectXInstrInfo::DirectXInstrInfo(const DirectXSubtarget &STI)
- : DirectXGenInstrInfo(STI) {}
+ : DirectXGenInstrInfo(STI, RI) {}
DirectXInstrInfo::~DirectXInstrInfo() {}
diff --git a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
index 55bafde..dd9f2fa 100644
--- a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
@@ -118,9 +118,9 @@ const int Hexagon_ADDI_OFFSET_MIN = -32768;
void HexagonInstrInfo::anchor() {}
HexagonInstrInfo::HexagonInstrInfo(const HexagonSubtarget &ST)
- : HexagonGenInstrInfo(ST, Hexagon::ADJCALLSTACKDOWN,
+ : HexagonGenInstrInfo(ST, RegInfo, Hexagon::ADJCALLSTACKDOWN,
Hexagon::ADJCALLSTACKUP),
- Subtarget(ST) {}
+ RegInfo(ST.getHwMode()), Subtarget(ST) {}
namespace llvm {
namespace HexagonFUnits {
diff --git a/llvm/lib/Target/Hexagon/HexagonInstrInfo.h b/llvm/lib/Target/Hexagon/HexagonInstrInfo.h
index 48adf82..7a0c77c 100644
--- a/llvm/lib/Target/Hexagon/HexagonInstrInfo.h
+++ b/llvm/lib/Target/Hexagon/HexagonInstrInfo.h
@@ -23,6 +23,8 @@
#include <cstdint>
#include <vector>
+#include "HexagonRegisterInfo.h"
+
#define GET_INSTRINFO_HEADER
#include "HexagonGenInstrInfo.inc"
@@ -36,6 +38,7 @@ class MachineOperand;
class TargetRegisterInfo;
class HexagonInstrInfo : public HexagonGenInstrInfo {
+ const HexagonRegisterInfo RegInfo;
const HexagonSubtarget &Subtarget;
enum BundleAttribute {
@@ -47,6 +50,8 @@ class HexagonInstrInfo : public HexagonGenInstrInfo {
public:
explicit HexagonInstrInfo(const HexagonSubtarget &ST);
+ const HexagonRegisterInfo &getRegisterInfo() const { return RegInfo; }
+
/// TargetInstrInfo overrides.
/// If the specified machine instruction is a direct
diff --git a/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp b/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp
index a3c8a88..66c8b0a 100644
--- a/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp
@@ -76,8 +76,7 @@ HexagonSubtarget::HexagonSubtarget(const Triple &TT, StringRef CPU,
OptLevel(TM.getOptLevel()),
CPUString(std::string(Hexagon_MC::selectHexagonCPU(CPU))),
TargetTriple(TT), InstrInfo(initializeSubtargetDependencies(CPU, FS)),
- RegInfo(getHwMode()), TLInfo(TM, *this),
- InstrItins(getInstrItineraryForCPU(CPUString)) {
+ TLInfo(TM, *this), InstrItins(getInstrItineraryForCPU(CPUString)) {
Hexagon_MC::addArchSubtarget(this, FS);
// Beware of the default constructor of InstrItineraryData: it will
// reset all members to 0.
diff --git a/llvm/lib/Target/Hexagon/HexagonSubtarget.h b/llvm/lib/Target/Hexagon/HexagonSubtarget.h
index 995f66d..30794f6 100644
--- a/llvm/lib/Target/Hexagon/HexagonSubtarget.h
+++ b/llvm/lib/Target/Hexagon/HexagonSubtarget.h
@@ -100,7 +100,6 @@ private:
// The following objects can use the TargetTriple, so they must be
// declared after it.
HexagonInstrInfo InstrInfo;
- HexagonRegisterInfo RegInfo;
HexagonTargetLowering TLInfo;
HexagonSelectionDAGInfo TSInfo;
HexagonFrameLowering FrameLowering;
@@ -122,7 +121,7 @@ public:
}
const HexagonInstrInfo *getInstrInfo() const override { return &InstrInfo; }
const HexagonRegisterInfo *getRegisterInfo() const override {
- return &RegInfo;
+ return &InstrInfo.getRegisterInfo();
}
const HexagonTargetLowering *getTargetLowering() const override {
return &TLInfo;
diff --git a/llvm/lib/Target/Lanai/LanaiInstrInfo.cpp b/llvm/lib/Target/Lanai/LanaiInstrInfo.cpp
index 02ed1001..b3d2856 100644
--- a/llvm/lib/Target/Lanai/LanaiInstrInfo.cpp
+++ b/llvm/lib/Target/Lanai/LanaiInstrInfo.cpp
@@ -27,7 +27,8 @@ using namespace llvm;
#include "LanaiGenInstrInfo.inc"
LanaiInstrInfo::LanaiInstrInfo(const LanaiSubtarget &STI)
- : LanaiGenInstrInfo(STI, Lanai::ADJCALLSTACKDOWN, Lanai::ADJCALLSTACKUP),
+ : LanaiGenInstrInfo(STI, RegisterInfo, Lanai::ADJCALLSTACKDOWN,
+ Lanai::ADJCALLSTACKUP),
RegisterInfo() {}
void LanaiInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
diff --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.cpp b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.cpp
index 9a35df2..5eb3bd6 100644
--- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.cpp
@@ -26,9 +26,9 @@ using namespace llvm;
#include "LoongArchGenInstrInfo.inc"
LoongArchInstrInfo::LoongArchInstrInfo(const LoongArchSubtarget &STI)
- : LoongArchGenInstrInfo(STI, LoongArch::ADJCALLSTACKDOWN,
+ : LoongArchGenInstrInfo(STI, RegInfo, LoongArch::ADJCALLSTACKDOWN,
LoongArch::ADJCALLSTACKUP),
- STI(STI) {}
+ RegInfo(STI.getHwMode()), STI(STI) {}
MCInst LoongArchInstrInfo::getNop() const {
return MCInstBuilder(LoongArch::ANDI)
diff --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.h b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.h
index e61314c..d43d229 100644
--- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.h
+++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.h
@@ -24,9 +24,13 @@ namespace llvm {
class LoongArchSubtarget;
class LoongArchInstrInfo : public LoongArchGenInstrInfo {
+ const LoongArchRegisterInfo RegInfo;
+
public:
explicit LoongArchInstrInfo(const LoongArchSubtarget &STI);
+ const LoongArchRegisterInfo &getRegisterInfo() const { return RegInfo; }
+
MCInst getNop() const override;
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
diff --git a/llvm/lib/Target/LoongArch/LoongArchSubtarget.cpp b/llvm/lib/Target/LoongArch/LoongArchSubtarget.cpp
index 3acbe49..76a8ba1 100644
--- a/llvm/lib/Target/LoongArch/LoongArchSubtarget.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchSubtarget.cpp
@@ -95,4 +95,4 @@ LoongArchSubtarget::LoongArchSubtarget(const Triple &TT, StringRef CPU,
: LoongArchGenSubtargetInfo(TT, CPU, TuneCPU, FS),
FrameLowering(
initializeSubtargetDependencies(TT, CPU, TuneCPU, FS, ABIName)),
- InstrInfo(*this), RegInfo(getHwMode()), TLInfo(TM, *this) {}
+ InstrInfo(*this), TLInfo(TM, *this) {}
diff --git a/llvm/lib/Target/LoongArch/LoongArchSubtarget.h b/llvm/lib/Target/LoongArch/LoongArchSubtarget.h
index 5e12baf..2beff07 100644
--- a/llvm/lib/Target/LoongArch/LoongArchSubtarget.h
+++ b/llvm/lib/Target/LoongArch/LoongArchSubtarget.h
@@ -45,7 +45,6 @@ class LoongArchSubtarget : public LoongArchGenSubtargetInfo {
LoongArchABI::ABI TargetABI = LoongArchABI::ABI_Unknown;
LoongArchFrameLowering FrameLowering;
LoongArchInstrInfo InstrInfo;
- LoongArchRegisterInfo RegInfo;
LoongArchTargetLowering TLInfo;
SelectionDAGTargetInfo TSInfo;
@@ -78,7 +77,7 @@ public:
}
const LoongArchInstrInfo *getInstrInfo() const override { return &InstrInfo; }
const LoongArchRegisterInfo *getRegisterInfo() const override {
- return &RegInfo;
+ return &InstrInfo.getRegisterInfo();
}
const LoongArchTargetLowering *getTargetLowering() const override {
return &TLInfo;
diff --git a/llvm/lib/Target/M68k/M68kSubtarget.h b/llvm/lib/Target/M68k/M68kSubtarget.h
index 16ca7d2..4f96858 100644
--- a/llvm/lib/Target/M68k/M68kSubtarget.h
+++ b/llvm/lib/Target/M68k/M68kSubtarget.h
@@ -27,8 +27,6 @@
#include "llvm/MC/MCInstrItineraries.h"
#include "llvm/Support/Alignment.h"
-#include <string>
-
#define GET_SUBTARGETINFO_HEADER
#include "M68kGenSubtargetInfo.inc"
diff --git a/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp b/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp
index 65b4820..af053b8 100644
--- a/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp
+++ b/llvm/lib/Target/MSP430/MSP430InstrInfo.cpp
@@ -26,7 +26,8 @@ using namespace llvm;
void MSP430InstrInfo::anchor() {}
MSP430InstrInfo::MSP430InstrInfo(const MSP430Subtarget &STI)
- : MSP430GenInstrInfo(STI, MSP430::ADJCALLSTACKDOWN, MSP430::ADJCALLSTACKUP),
+ : MSP430GenInstrInfo(STI, RI, MSP430::ADJCALLSTACKDOWN,
+ MSP430::ADJCALLSTACKUP),
RI() {}
void MSP430InstrInfo::storeRegToStackSlot(
diff --git a/llvm/lib/Target/Mips/Mips16InstrInfo.cpp b/llvm/lib/Target/Mips/Mips16InstrInfo.cpp
index aa94f54..69b96cf 100644
--- a/llvm/lib/Target/Mips/Mips16InstrInfo.cpp
+++ b/llvm/lib/Target/Mips/Mips16InstrInfo.cpp
@@ -37,11 +37,7 @@ using namespace llvm;
#define DEBUG_TYPE "mips16-instrinfo"
Mips16InstrInfo::Mips16InstrInfo(const MipsSubtarget &STI)
- : MipsInstrInfo(STI, Mips::Bimm16), RI(STI) {}
-
-const MipsRegisterInfo &Mips16InstrInfo::getRegisterInfo() const {
- return RI;
-}
+ : MipsInstrInfo(STI, RI, Mips::Bimm16), RI(STI) {}
/// isLoadFromStackSlot - If the specified machine instruction is a direct
/// load from a stack slot, return the virtual or physical register number of
diff --git a/llvm/lib/Target/Mips/Mips16InstrInfo.h b/llvm/lib/Target/Mips/Mips16InstrInfo.h
index 1058e8c..2834fd3 100644
--- a/llvm/lib/Target/Mips/Mips16InstrInfo.h
+++ b/llvm/lib/Target/Mips/Mips16InstrInfo.h
@@ -30,7 +30,7 @@ class Mips16InstrInfo : public MipsInstrInfo {
public:
explicit Mips16InstrInfo(const MipsSubtarget &STI);
- const MipsRegisterInfo &getRegisterInfo() const override;
+ const Mips16RegisterInfo &getRegisterInfo() const { return RI; }
/// isLoadFromStackSlot - If the specified machine instruction is a direct
/// load from a stack slot, return the virtual or physical register number of
diff --git a/llvm/lib/Target/Mips/MipsInstrInfo.cpp b/llvm/lib/Target/Mips/MipsInstrInfo.cpp
index bffdffa..c879c46 100644
--- a/llvm/lib/Target/Mips/MipsInstrInfo.cpp
+++ b/llvm/lib/Target/Mips/MipsInstrInfo.cpp
@@ -39,8 +39,9 @@ using namespace llvm;
// Pin the vtable to this file.
void MipsInstrInfo::anchor() {}
-MipsInstrInfo::MipsInstrInfo(const MipsSubtarget &STI, unsigned UncondBr)
- : MipsGenInstrInfo(STI, Mips::ADJCALLSTACKDOWN, Mips::ADJCALLSTACKUP),
+MipsInstrInfo::MipsInstrInfo(const MipsSubtarget &STI,
+ const MipsRegisterInfo &RI, unsigned UncondBr)
+ : MipsGenInstrInfo(STI, RI, Mips::ADJCALLSTACKDOWN, Mips::ADJCALLSTACKUP),
Subtarget(STI), UncondBrOpc(UncondBr) {}
const MipsInstrInfo *MipsInstrInfo::create(MipsSubtarget &STI) {
diff --git a/llvm/lib/Target/Mips/MipsInstrInfo.h b/llvm/lib/Target/Mips/MipsInstrInfo.h
index 2337ae7..fc94248 100644
--- a/llvm/lib/Target/Mips/MipsInstrInfo.h
+++ b/llvm/lib/Target/Mips/MipsInstrInfo.h
@@ -55,7 +55,8 @@ public:
BT_Indirect // One indirct branch.
};
- explicit MipsInstrInfo(const MipsSubtarget &STI, unsigned UncondBrOpc);
+ explicit MipsInstrInfo(const MipsSubtarget &STI, const MipsRegisterInfo &RI,
+ unsigned UncondBrOpc);
MCInst getNop() const override;
@@ -130,7 +131,10 @@ public:
/// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
/// such, whenever a client has an instance of instruction info, it should
/// always be able to get register info as well (through this method).
- virtual const MipsRegisterInfo &getRegisterInfo() const = 0;
+ const MipsRegisterInfo &getRegisterInfo() const {
+ return static_cast<const MipsRegisterInfo &>(
+ TargetInstrInfo::getRegisterInfo());
+ }
virtual unsigned getOppositeBranchOpc(unsigned Opc) const = 0;
diff --git a/llvm/lib/Target/Mips/MipsSEInstrInfo.cpp b/llvm/lib/Target/Mips/MipsSEInstrInfo.cpp
index dbdbb17..517f489 100644
--- a/llvm/lib/Target/Mips/MipsSEInstrInfo.cpp
+++ b/llvm/lib/Target/Mips/MipsSEInstrInfo.cpp
@@ -28,11 +28,7 @@ static unsigned getUnconditionalBranch(const MipsSubtarget &STI) {
}
MipsSEInstrInfo::MipsSEInstrInfo(const MipsSubtarget &STI)
- : MipsInstrInfo(STI, getUnconditionalBranch(STI)), RI(STI) {}
-
-const MipsRegisterInfo &MipsSEInstrInfo::getRegisterInfo() const {
- return RI;
-}
+ : MipsInstrInfo(STI, RI, getUnconditionalBranch(STI)), RI(STI) {}
/// isLoadFromStackSlot - If the specified machine instruction is a direct
/// load from a stack slot, return the virtual or physical register number of
diff --git a/llvm/lib/Target/Mips/MipsSEInstrInfo.h b/llvm/lib/Target/Mips/MipsSEInstrInfo.h
index 2b4f55d..0a7a0e5 100644
--- a/llvm/lib/Target/Mips/MipsSEInstrInfo.h
+++ b/llvm/lib/Target/Mips/MipsSEInstrInfo.h
@@ -24,7 +24,7 @@ class MipsSEInstrInfo : public MipsInstrInfo {
public:
explicit MipsSEInstrInfo(const MipsSubtarget &STI);
- const MipsRegisterInfo &getRegisterInfo() const override;
+ const MipsSERegisterInfo &getRegisterInfo() const { return RI; }
/// isLoadFromStackSlot - If the specified machine instruction is a direct
/// load from a stack slot, return the virtual or physical register number of
diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.cpp b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.cpp
index 6840c7a..db2d96f 100644
--- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.cpp
@@ -26,7 +26,7 @@ using namespace llvm;
void NVPTXInstrInfo::anchor() {}
NVPTXInstrInfo::NVPTXInstrInfo(const NVPTXSubtarget &STI)
- : NVPTXGenInstrInfo(STI), RegInfo() {}
+ : NVPTXGenInstrInfo(STI, RegInfo), RegInfo() {}
void NVPTXInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
index 3014aa6..8d9d4c7 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
+++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
@@ -89,7 +89,7 @@ static cl::opt<bool> EnableFMARegPressureReduction(
void PPCInstrInfo::anchor() {}
PPCInstrInfo::PPCInstrInfo(const PPCSubtarget &STI)
- : PPCGenInstrInfo(STI, PPC::ADJCALLSTACKDOWN, PPC::ADJCALLSTACKUP,
+ : PPCGenInstrInfo(STI, RI, PPC::ADJCALLSTACKDOWN, PPC::ADJCALLSTACKUP,
/* CatchRetOpcode */ -1,
STI.isPPC64() ? PPC::BLR8 : PPC::BLR),
Subtarget(STI), RI(STI.getTargetMachine()) {}
diff --git a/llvm/lib/Target/PowerPC/PPCSubtarget.h b/llvm/lib/Target/PowerPC/PPCSubtarget.h
index f275802..7d93358 100644
--- a/llvm/lib/Target/PowerPC/PPCSubtarget.h
+++ b/llvm/lib/Target/PowerPC/PPCSubtarget.h
@@ -23,7 +23,6 @@
#include "llvm/IR/DataLayout.h"
#include "llvm/MC/MCInstrItineraries.h"
#include "llvm/TargetParser/Triple.h"
-#include <string>
#define GET_SUBTARGETINFO_HEADER
#include "PPCGenSubtargetInfo.inc"
diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td
index 5b72334..0b964c4 100644
--- a/llvm/lib/Target/RISCV/RISCVFeatures.td
+++ b/llvm/lib/Target/RISCV/RISCVFeatures.td
@@ -956,6 +956,9 @@ def FeatureStdExtSsdbltrp
def FeatureStdExtSmepmp
: RISCVExtension<1, 0, "Enhanced Physical Memory Protection">;
+def FeatureStdExtSmpmpmt
+ : RISCVExperimentalExtension<0, 6, "PMP-based Memory Types Extension">;
+
def FeatureStdExtSmrnmi
: RISCVExtension<1, 0, "Resumable Non-Maskable Interrupts">;
def HasStdExtSmrnmi : Predicate<"Subtarget->hasStdExtSmrnmi()">,
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index b05956b..ce8dd3b 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -82,8 +82,9 @@ namespace llvm::RISCV {
} // end namespace llvm::RISCV
RISCVInstrInfo::RISCVInstrInfo(const RISCVSubtarget &STI)
- : RISCVGenInstrInfo(STI, RISCV::ADJCALLSTACKDOWN, RISCV::ADJCALLSTACKUP),
- STI(STI) {}
+ : RISCVGenInstrInfo(STI, RegInfo, RISCV::ADJCALLSTACKDOWN,
+ RISCV::ADJCALLSTACKUP),
+ RegInfo(STI.getHwMode()), STI(STI) {}
#define GET_INSTRINFO_HELPERS
#include "RISCVGenInstrInfo.inc"
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.h b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
index c5eddb9..800af26 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.h
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.h
@@ -79,10 +79,13 @@ enum RISCVMachineCombinerPattern : unsigned {
};
class RISCVInstrInfo : public RISCVGenInstrInfo {
+ const RISCVRegisterInfo RegInfo;
public:
explicit RISCVInstrInfo(const RISCVSubtarget &STI);
+ const RISCVRegisterInfo &getRegisterInfo() const { return RegInfo; }
+
MCInst getNop() const override;
Register isLoadFromStackSlot(const MachineInstr &MI,
diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.cpp b/llvm/lib/Target/RISCV/RISCVSubtarget.cpp
index 715ac4c..3b43be3 100644
--- a/llvm/lib/Target/RISCV/RISCVSubtarget.cpp
+++ b/llvm/lib/Target/RISCV/RISCVSubtarget.cpp
@@ -104,7 +104,7 @@ RISCVSubtarget::RISCVSubtarget(const Triple &TT, StringRef CPU,
RVVVectorBitsMin(RVVVectorBitsMin), RVVVectorBitsMax(RVVVectorBitsMax),
FrameLowering(
initializeSubtargetDependencies(TT, CPU, TuneCPU, FS, ABIName)),
- InstrInfo(*this), RegInfo(getHwMode()), TLInfo(TM, *this) {
+ InstrInfo(*this), TLInfo(TM, *this) {
TSInfo = std::make_unique<RISCVSelectionDAGInfo>();
}
diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.h b/llvm/lib/Target/RISCV/RISCVSubtarget.h
index 4b4fc8f..d5ffa6c 100644
--- a/llvm/lib/Target/RISCV/RISCVSubtarget.h
+++ b/llvm/lib/Target/RISCV/RISCVSubtarget.h
@@ -112,7 +112,6 @@ private:
RISCVFrameLowering FrameLowering;
RISCVInstrInfo InstrInfo;
- RISCVRegisterInfo RegInfo;
RISCVTargetLowering TLInfo;
/// Initializes using the passed in CPU and feature strings so that we can
@@ -140,7 +139,7 @@ public:
}
const RISCVInstrInfo *getInstrInfo() const override { return &InstrInfo; }
const RISCVRegisterInfo *getRegisterInfo() const override {
- return &RegInfo;
+ return &InstrInfo.getRegisterInfo();
}
const RISCVTargetLowering *getTargetLowering() const override {
return &TLInfo;
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
index 332433b..3d8eb40 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -1683,7 +1683,8 @@ InstructionCost RISCVTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
!TypeSize::isKnownLE(DL.getTypeSizeInBits(Src),
SrcLT.second.getSizeInBits()) ||
!TypeSize::isKnownLE(DL.getTypeSizeInBits(Dst),
- DstLT.second.getSizeInBits()))
+ DstLT.second.getSizeInBits()) ||
+ SrcLT.first > 1 || DstLT.first > 1)
return BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I);
// The split cost is handled by the base getCastInstrCost
diff --git a/llvm/lib/Target/SPIRV/SPIRVAsmPrinter.cpp b/llvm/lib/Target/SPIRV/SPIRVAsmPrinter.cpp
index 0175f2f..970b83d 100644
--- a/llvm/lib/Target/SPIRV/SPIRVAsmPrinter.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVAsmPrinter.cpp
@@ -612,13 +612,10 @@ void SPIRVAsmPrinter::outputExecutionMode(const Module &M) {
// Collect the SPIRVTypes for fp16, fp32, and fp64 and the constant of
// type int32 with 0 value to represent the FP Fast Math Mode.
std::vector<const MachineInstr *> SPIRVFloatTypes;
- const MachineInstr *ConstZero = nullptr;
+ const MachineInstr *ConstZeroInt32 = nullptr;
for (const MachineInstr *MI :
MAI->getMSInstrs(SPIRV::MB_TypeConstVars)) {
- // Skip if the instruction is not OpTypeFloat or OpConstant.
unsigned OpCode = MI->getOpcode();
- if (OpCode != SPIRV::OpTypeFloat && OpCode != SPIRV::OpConstantNull)
- continue;
// Collect the SPIRV type if it's a float.
if (OpCode == SPIRV::OpTypeFloat) {
@@ -629,14 +626,18 @@ void SPIRVAsmPrinter::outputExecutionMode(const Module &M) {
continue;
}
SPIRVFloatTypes.push_back(MI);
- } else {
+ continue;
+ }
+
+ if (OpCode == SPIRV::OpConstantNull) {
// Check if the constant is int32, if not skip it.
const MachineRegisterInfo &MRI = MI->getMF()->getRegInfo();
MachineInstr *TypeMI = MRI.getVRegDef(MI->getOperand(1).getReg());
- if (!TypeMI || TypeMI->getOperand(1).getImm() != 32)
- continue;
-
- ConstZero = MI;
+ bool IsInt32Ty = TypeMI &&
+ TypeMI->getOpcode() == SPIRV::OpTypeInt &&
+ TypeMI->getOperand(1).getImm() == 32;
+ if (IsInt32Ty)
+ ConstZeroInt32 = MI;
}
}
@@ -657,9 +658,9 @@ void SPIRVAsmPrinter::outputExecutionMode(const Module &M) {
MCRegister TypeReg =
MAI->getRegisterAlias(MF, MI->getOperand(0).getReg());
Inst.addOperand(MCOperand::createReg(TypeReg));
- assert(ConstZero && "There should be a constant zero.");
+ assert(ConstZeroInt32 && "There should be a constant zero.");
MCRegister ConstReg = MAI->getRegisterAlias(
- ConstZero->getMF(), ConstZero->getOperand(0).getReg());
+ ConstZeroInt32->getMF(), ConstZeroInt32->getOperand(0).getReg());
Inst.addOperand(MCOperand::createReg(ConstReg));
outputMCInst(Inst);
}
diff --git a/llvm/lib/Target/SPIRV/SPIRVInstrInfo.cpp b/llvm/lib/Target/SPIRV/SPIRVInstrInfo.cpp
index ba95ad8..4f8bf43 100644
--- a/llvm/lib/Target/SPIRV/SPIRVInstrInfo.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVInstrInfo.cpp
@@ -24,7 +24,7 @@
using namespace llvm;
SPIRVInstrInfo::SPIRVInstrInfo(const SPIRVSubtarget &STI)
- : SPIRVGenInstrInfo(STI) {}
+ : SPIRVGenInstrInfo(STI, RI) {}
bool SPIRVInstrInfo::isConstantInstr(const MachineInstr &MI) const {
switch (MI.getOpcode()) {
diff --git a/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp b/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp
index fbb127d..b8cd9c1 100644
--- a/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp
@@ -249,17 +249,18 @@ static InstrSignature instrToSignature(const MachineInstr &MI,
InstrSignature Signature{MI.getOpcode()};
for (unsigned i = 0; i < MI.getNumOperands(); ++i) {
// The only decorations that can be applied more than once to a given <id>
- // or structure member are UserSemantic(5635), CacheControlLoadINTEL (6442),
- // and CacheControlStoreINTEL (6443). For all the rest of decorations, we
- // will only add to the signature the Opcode, the id to which it applies,
- // and the decoration id, disregarding any decoration flags. This will
- // ensure that any subsequent decoration with the same id will be deemed as
- // a duplicate. Then, at the call site, we will be able to handle duplicates
- // in the best way.
+ // or structure member are FuncParamAttr (38), UserSemantic (5635),
+ // CacheControlLoadINTEL (6442), and CacheControlStoreINTEL (6443). For all
+ // the rest of decorations, we will only add to the signature the Opcode,
+ // the id to which it applies, and the decoration id, disregarding any
+ // decoration flags. This will ensure that any subsequent decoration with
+ // the same id will be deemed as a duplicate. Then, at the call site, we
+ // will be able to handle duplicates in the best way.
unsigned Opcode = MI.getOpcode();
if ((Opcode == SPIRV::OpDecorate) && i >= 2) {
unsigned DecorationID = MI.getOperand(1).getImm();
- if (DecorationID != SPIRV::Decoration::UserSemantic &&
+ if (DecorationID != SPIRV::Decoration::FuncParamAttr &&
+ DecorationID != SPIRV::Decoration::UserSemantic &&
DecorationID != SPIRV::Decoration::CacheControlLoadINTEL &&
DecorationID != SPIRV::Decoration::CacheControlStoreINTEL)
continue;
diff --git a/llvm/lib/Target/Sparc/SparcInstrInfo.cpp b/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
index f66eb9d..fcd6cd7 100644
--- a/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
+++ b/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
@@ -38,8 +38,8 @@ static cl::opt<unsigned>
void SparcInstrInfo::anchor() {}
SparcInstrInfo::SparcInstrInfo(const SparcSubtarget &ST)
- : SparcGenInstrInfo(ST, SP::ADJCALLSTACKDOWN, SP::ADJCALLSTACKUP), RI(ST),
- Subtarget(ST) {}
+ : SparcGenInstrInfo(ST, RI, SP::ADJCALLSTACKDOWN, SP::ADJCALLSTACKUP),
+ RI(ST), Subtarget(ST) {}
/// isLoadFromStackSlot - If the specified machine instruction is a direct
/// load from a stack slot, return the virtual or physical register number of
diff --git a/llvm/lib/Target/Sparc/SparcSubtarget.h b/llvm/lib/Target/Sparc/SparcSubtarget.h
index b1decca..f575f6d 100644
--- a/llvm/lib/Target/Sparc/SparcSubtarget.h
+++ b/llvm/lib/Target/Sparc/SparcSubtarget.h
@@ -21,7 +21,6 @@
#include "llvm/IR/DataLayout.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/TargetParser/Triple.h"
-#include <string>
#define GET_SUBTARGETINFO_HEADER
#include "SparcGenSubtargetInfo.inc"
diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
index 2e21f27..23e7e7e 100644
--- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
@@ -60,7 +60,7 @@ static uint64_t allOnes(unsigned int Count) {
void SystemZInstrInfo::anchor() {}
SystemZInstrInfo::SystemZInstrInfo(const SystemZSubtarget &sti)
- : SystemZGenInstrInfo(sti, -1, -1),
+ : SystemZGenInstrInfo(sti, RI, -1, -1),
RI(sti.getSpecialRegisters()->getReturnFunctionAddressRegister(),
sti.getHwMode()),
STI(sti) {}
diff --git a/llvm/lib/Target/VE/VEInstrInfo.cpp b/llvm/lib/Target/VE/VEInstrInfo.cpp
index d5e804a..bae703b 100644
--- a/llvm/lib/Target/VE/VEInstrInfo.cpp
+++ b/llvm/lib/Target/VE/VEInstrInfo.cpp
@@ -35,7 +35,7 @@ using namespace llvm;
void VEInstrInfo::anchor() {}
VEInstrInfo::VEInstrInfo(const VESubtarget &ST)
- : VEGenInstrInfo(ST, VE::ADJCALLSTACKDOWN, VE::ADJCALLSTACKUP), RI() {}
+ : VEGenInstrInfo(ST, RI, VE::ADJCALLSTACKDOWN, VE::ADJCALLSTACKUP), RI() {}
static bool IsIntegerCC(unsigned CC) { return (CC < VECC::CC_AF); }
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp
index 343d90e..8b4e4fb 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp
@@ -34,7 +34,7 @@ using namespace llvm;
#include "WebAssemblyGenInstrInfo.inc"
WebAssemblyInstrInfo::WebAssemblyInstrInfo(const WebAssemblySubtarget &STI)
- : WebAssemblyGenInstrInfo(STI, WebAssembly::ADJCALLSTACKDOWN,
+ : WebAssemblyGenInstrInfo(STI, RI, WebAssembly::ADJCALLSTACKDOWN,
WebAssembly::ADJCALLSTACKUP,
WebAssembly::CATCHRET),
RI(STI.getTargetTriple()) {}
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp
index 92a9812..70f7b88 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp
@@ -119,18 +119,82 @@ InstructionCost WebAssemblyTTIImpl::getCastInstrCost(
}
}
- // extend_low
static constexpr TypeConversionCostTblEntry ConversionTbl[] = {
+ // extend_low
{ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 1},
{ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 1},
{ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1},
{ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1},
{ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1},
{ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1},
+ // 2 x extend_low
{ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i16, 2},
{ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i16, 2},
{ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 2},
{ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2},
+ // extend_low, extend_high
+ {ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2},
+ {ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2},
+ {ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2},
+ {ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2},
+ {ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2},
+ {ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2},
+ // 2x extend_low, extend_high
+ {ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i32, 4},
+ {ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i32, 4},
+ {ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 4},
+ {ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 4},
+ // shuffle
+ {ISD::TRUNCATE, MVT::v2i16, MVT::v2i32, 2},
+ {ISD::TRUNCATE, MVT::v2i8, MVT::v2i32, 4},
+ {ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 2},
+ {ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 4},
+ // narrow, and
+ {ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2},
+ {ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 2},
+ // narrow, 2x and
+ {ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3},
+ // 3x narrow, 4x and
+ {ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 7},
+ {ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 7},
+ // 7x narrow, 8x and
+ {ISD::TRUNCATE, MVT::v16i8, MVT::v16i64, 15},
+ // convert_i32x4
+ {ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1},
+ {ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1},
+ {ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1},
+ {ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1},
+ // extend_low, convert
+ {ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 2},
+ {ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 2},
+ {ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2},
+ {ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2},
+ // extend_low x 2, convert
+ {ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8, 3},
+ {ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8, 3},
+ {ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3},
+ {ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 3},
+ // several shuffles
+ {ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 10},
+ {ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 10},
+ {ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 10},
+ {ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 10},
+ /// trunc_sat, const, and, 3x narrow
+ {ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f32, 6},
+ {ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f32, 6},
+ {ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 6},
+ {ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 6},
+ /// trunc_sat, const, and, narrow
+ {ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f32, 4},
+ {ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f32, 4},
+ {ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 4},
+ {ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 4},
+ // 2x trunc_sat, const, 2x and, 3x narrow
+ {ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 8},
+ {ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f32, 8},
+ // 2x trunc_sat, const, 2x and, narrow
+ {ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f32, 6},
+ {ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f32, 6},
};
if (const auto *Entry =
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h b/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h
index 2573066..4146c0e 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h
@@ -21,7 +21,6 @@
#include "WebAssemblyTargetMachine.h"
#include "llvm/CodeGen/BasicTTIImpl.h"
-#include <algorithm>
namespace llvm {
diff --git a/llvm/lib/Target/X86/X86FlagsCopyLowering.cpp b/llvm/lib/Target/X86/X86FlagsCopyLowering.cpp
index ab6e6d0..b3bf37a 100644
--- a/llvm/lib/Target/X86/X86FlagsCopyLowering.cpp
+++ b/llvm/lib/Target/X86/X86FlagsCopyLowering.cpp
@@ -50,7 +50,6 @@
#include "llvm/Pass.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
-#include <algorithm>
#include <cassert>
#include <iterator>
#include <utility>
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 05a854a..5bce539 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -635,6 +635,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::FROUNDEVEN, VT, Action);
setOperationAction(ISD::FTRUNC, VT, Action);
setOperationAction(ISD::FLDEXP, VT, Action);
+ setOperationAction(ISD::FSINCOSPI, VT, Action);
};
if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) {
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index 6b2a7a4..2c6d1af 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -85,7 +85,7 @@ static cl::opt<unsigned> UndefRegClearance(
void X86InstrInfo::anchor() {}
X86InstrInfo::X86InstrInfo(const X86Subtarget &STI)
- : X86GenInstrInfo(STI,
+ : X86GenInstrInfo(STI, RI,
(STI.isTarget64BitLP64() ? X86::ADJCALLSTACKDOWN64
: X86::ADJCALLSTACKDOWN32),
(STI.isTarget64BitLP64() ? X86::ADJCALLSTACKUP64
diff --git a/llvm/lib/Target/X86/X86Subtarget.h b/llvm/lib/Target/X86/X86Subtarget.h
index 868f413..4f5aadc 100644
--- a/llvm/lib/Target/X86/X86Subtarget.h
+++ b/llvm/lib/Target/X86/X86Subtarget.h
@@ -419,6 +419,8 @@ public:
/// Enable the MachineScheduler pass for all X86 subtargets.
bool enableMachineScheduler() const override { return true; }
+ bool enableTerminalRule() const override { return true; }
+
bool enableEarlyIfConversion() const override;
void getPostRAMutations(std::vector<std::unique_ptr<ScheduleDAGMutation>>
diff --git a/llvm/lib/Target/X86/X86TargetMachine.cpp b/llvm/lib/Target/X86/X86TargetMachine.cpp
index 0c2bd7c..d4ad98a 100644
--- a/llvm/lib/Target/X86/X86TargetMachine.cpp
+++ b/llvm/lib/Target/X86/X86TargetMachine.cpp
@@ -50,7 +50,6 @@
#include "llvm/Transforms/CFGuard.h"
#include <memory>
#include <optional>
-#include <string>
using namespace llvm;
diff --git a/llvm/lib/Target/XCore/XCoreInstrInfo.cpp b/llvm/lib/Target/XCore/XCoreInstrInfo.cpp
index 1a9133a..80fda34 100644
--- a/llvm/lib/Target/XCore/XCoreInstrInfo.cpp
+++ b/llvm/lib/Target/XCore/XCoreInstrInfo.cpp
@@ -43,7 +43,7 @@ namespace XCore {
void XCoreInstrInfo::anchor() {}
XCoreInstrInfo::XCoreInstrInfo(const XCoreSubtarget &ST)
- : XCoreGenInstrInfo(ST, XCore::ADJCALLSTACKDOWN, XCore::ADJCALLSTACKUP),
+ : XCoreGenInstrInfo(ST, RI, XCore::ADJCALLSTACKDOWN, XCore::ADJCALLSTACKUP),
RI() {}
static bool isZeroImm(const MachineOperand &op) {
diff --git a/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp b/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp
index be69cef..6bbebde 100644
--- a/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp
+++ b/llvm/lib/Target/Xtensa/XtensaInstrInfo.cpp
@@ -48,7 +48,8 @@ addFrameReference(const MachineInstrBuilder &MIB, int FI) {
}
XtensaInstrInfo::XtensaInstrInfo(const XtensaSubtarget &STI)
- : XtensaGenInstrInfo(STI, Xtensa::ADJCALLSTACKDOWN, Xtensa::ADJCALLSTACKUP),
+ : XtensaGenInstrInfo(STI, RI, Xtensa::ADJCALLSTACKDOWN,
+ Xtensa::ADJCALLSTACKUP),
RI(STI), STI(STI) {}
Register XtensaInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
diff --git a/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp b/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp
index b5548d4..8c8d16a6 100644
--- a/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp
+++ b/llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp
@@ -1944,6 +1944,10 @@ void InstrLowerer::emitNameData() {
NamesVar = new GlobalVariable(M, NamesVal->getType(), true,
GlobalValue::PrivateLinkage, NamesVal,
getInstrProfNamesVarName());
+ if (isGPUProfTarget(M)) {
+ NamesVar->setLinkage(GlobalValue::ExternalLinkage);
+ NamesVar->setVisibility(GlobalValue::ProtectedVisibility);
+ }
NamesSize = CompressedNameStr.size();
setGlobalVariableLargeSection(TT, *NamesVar);
diff --git a/llvm/lib/Transforms/Scalar/DropUnnecessaryAssumes.cpp b/llvm/lib/Transforms/Scalar/DropUnnecessaryAssumes.cpp
index a577f51..4a7144f 100644
--- a/llvm/lib/Transforms/Scalar/DropUnnecessaryAssumes.cpp
+++ b/llvm/lib/Transforms/Scalar/DropUnnecessaryAssumes.cpp
@@ -78,11 +78,16 @@ DropUnnecessaryAssumesPass::run(Function &F, FunctionAnalysisManager &FAM) {
SmallVector<OperandBundleDef> KeptBundles;
unsigned NumBundles = Assume->getNumOperandBundles();
for (unsigned I = 0; I != NumBundles; ++I) {
- auto IsDead = [](OperandBundleUse Bundle) {
+ auto IsDead = [&](OperandBundleUse Bundle) {
// "ignore" operand bundles are always dead.
if (Bundle.getTagName() == "ignore")
return true;
+ // "dereferenceable" operand bundles are only dropped if requested
+ // (e.g., after loop vectorization has run).
+ if (Bundle.getTagName() == "dereferenceable")
+ return DropDereferenceable;
+
// Bundles without arguments do not affect any specific values.
// Always keep them for now.
if (Bundle.Inputs.empty())
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 45b5570..566d6ea 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -1232,6 +1232,30 @@ public:
/// Superset of instructions that return true for isScalarWithPredication.
bool isPredicatedInst(Instruction *I) const;
+ /// A helper function that returns how much we should divide the cost of a
+ /// predicated block by. Typically this is the reciprocal of the block
+ /// probability, i.e. if we return X we are assuming the predicated block will
+ /// execute once for every X iterations of the loop header so the block should
+ /// only contribute 1/X of its cost to the total cost calculation, but when
+ /// optimizing for code size it will just be 1 as code size costs don't depend
+ /// on execution probabilities.
+ ///
+ /// TODO: We should use actual block probability here, if available.
+ /// Currently, we always assume predicated blocks have a 50% chance of
+ /// executing, apart from blocks that are only predicated due to tail folding.
+ inline unsigned
+ getPredBlockCostDivisor(TargetTransformInfo::TargetCostKind CostKind,
+ BasicBlock *BB) const {
+ // If a block wasn't originally predicated but was predicated due to
+ // e.g. tail folding, don't divide the cost. Tail folded loops may still be
+ // predicated in the final vector loop iteration, but for most loops that
+ // don't have low trip counts we can expect their probability to be close to
+ // zero.
+ if (!Legal->blockNeedsPredication(BB))
+ return 1;
+ return CostKind == TTI::TCK_CodeSize ? 1 : 2;
+ }
+
/// Return the costs for our two available strategies for lowering a
/// div/rem operation which requires speculating at least one lane.
/// First result is for scalarization (will be invalid for scalable
@@ -2887,7 +2911,8 @@ LoopVectorizationCostModel::getDivRemSpeculationCost(Instruction *I,
// Scale the cost by the probability of executing the predicated blocks.
// This assumes the predicated block for each vector lane is equally
// likely.
- ScalarizationCost = ScalarizationCost / getPredBlockCostDivisor(CostKind);
+ ScalarizationCost =
+ ScalarizationCost / getPredBlockCostDivisor(CostKind, I->getParent());
}
InstructionCost SafeDivisorCost = 0;
@@ -5032,7 +5057,7 @@ InstructionCost LoopVectorizationCostModel::computePredInstDiscount(
}
// Scale the total scalar cost by block probability.
- ScalarCost /= getPredBlockCostDivisor(CostKind);
+ ScalarCost /= getPredBlockCostDivisor(CostKind, I->getParent());
// Compute the discount. A non-negative discount means the vector version
// of the instruction costs more, and scalarizing would be beneficial.
@@ -5082,10 +5107,11 @@ InstructionCost LoopVectorizationCostModel::expectedCost(ElementCount VF) {
// stores and instructions that may divide by zero) will now be
// unconditionally executed. For the scalar case, we may not always execute
// the predicated block, if it is an if-else block. Thus, scale the block's
- // cost by the probability of executing it. blockNeedsPredication from
- // Legal is used so as to not include all blocks in tail folded loops.
- if (VF.isScalar() && Legal->blockNeedsPredication(BB))
- BlockCost /= getPredBlockCostDivisor(CostKind);
+ // cost by the probability of executing it.
+ // getPredBlockCostDivisor will return 1 for blocks that are only predicated
+ // by the header mask when folding the tail.
+ if (VF.isScalar())
+ BlockCost /= getPredBlockCostDivisor(CostKind, BB);
Cost += BlockCost;
}
@@ -5164,7 +5190,7 @@ LoopVectorizationCostModel::getMemInstScalarizationCost(Instruction *I,
// conditional branches, but may not be executed for each vector lane. Scale
// the cost by the probability of executing the predicated block.
if (isPredicatedInst(I)) {
- Cost /= getPredBlockCostDivisor(CostKind);
+ Cost /= getPredBlockCostDivisor(CostKind, I->getParent());
// Add the cost of an i1 extract and a branch
auto *VecI1Ty =
@@ -6732,6 +6758,10 @@ bool VPCostContext::skipCostComputation(Instruction *UI, bool IsVector) const {
SkipCostComputation.contains(UI);
}
+unsigned VPCostContext::getPredBlockCostDivisor(BasicBlock *BB) const {
+ return CM.getPredBlockCostDivisor(CostKind, BB);
+}
+
InstructionCost
LoopVectorizationPlanner::precomputeCosts(VPlan &Plan, ElementCount VF,
VPCostContext &CostCtx) const {
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.cpp b/llvm/lib/Transforms/Vectorize/VPlan.cpp
index 5e4303a..90696ff 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlan.cpp
@@ -99,20 +99,20 @@ VPValue::VPValue(const unsigned char SC, Value *UV, VPDef *Def)
VPValue::~VPValue() {
assert(Users.empty() && "trying to delete a VPValue with remaining users");
- if (Def)
+ if (VPDef *Def = getDefiningRecipe())
Def->removeDefinedValue(this);
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void VPValue::print(raw_ostream &OS, VPSlotTracker &SlotTracker) const {
- if (const VPRecipeBase *R = dyn_cast_or_null<VPRecipeBase>(Def))
+ if (const VPRecipeBase *R = getDefiningRecipe())
R->print(OS, "", SlotTracker);
else
printAsOperand(OS, SlotTracker);
}
void VPValue::dump() const {
- const VPRecipeBase *Instr = dyn_cast_or_null<VPRecipeBase>(this->Def);
+ const VPRecipeBase *Instr = getDefiningRecipe();
VPSlotTracker SlotTracker(
(Instr && Instr->getParent()) ? Instr->getParent()->getPlan() : nullptr);
print(dbgs(), SlotTracker);
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h
index 5851b3a..72858e1 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.h
+++ b/llvm/lib/Transforms/Vectorize/VPlan.h
@@ -959,6 +959,11 @@ public:
/// Add metadata with kind \p Kind and \p Node.
void addMetadata(unsigned Kind, MDNode *Node) {
+ assert(none_of(Metadata,
+ [Kind](const std::pair<unsigned, MDNode *> &P) {
+ return P.first == Kind;
+ }) &&
+ "Kind must appear at most once in Metadata");
Metadata.emplace_back(Kind, Node);
}
diff --git a/llvm/lib/Transforms/Vectorize/VPlanHelpers.h b/llvm/lib/Transforms/Vectorize/VPlanHelpers.h
index 965426f..caabfa7 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanHelpers.h
+++ b/llvm/lib/Transforms/Vectorize/VPlanHelpers.h
@@ -50,21 +50,6 @@ Value *getRuntimeVF(IRBuilderBase &B, Type *Ty, ElementCount VF);
Value *createStepForVF(IRBuilderBase &B, Type *Ty, ElementCount VF,
int64_t Step);
-/// A helper function that returns how much we should divide the cost of a
-/// predicated block by. Typically this is the reciprocal of the block
-/// probability, i.e. if we return X we are assuming the predicated block will
-/// execute once for every X iterations of the loop header so the block should
-/// only contribute 1/X of its cost to the total cost calculation, but when
-/// optimizing for code size it will just be 1 as code size costs don't depend
-/// on execution probabilities.
-///
-/// TODO: We should use actual block probability here, if available. Currently,
-/// we always assume predicated blocks have a 50% chance of executing.
-inline unsigned
-getPredBlockCostDivisor(TargetTransformInfo::TargetCostKind CostKind) {
- return CostKind == TTI::TCK_CodeSize ? 1 : 2;
-}
-
/// A range of powers-of-2 vectorization factors with fixed start and
/// adjustable end. The range includes start and excludes end, e.g.,:
/// [1, 16) = {1, 2, 4, 8}
@@ -367,6 +352,10 @@ struct VPCostContext {
/// has already been pre-computed.
bool skipCostComputation(Instruction *UI, bool IsVector) const;
+ /// \returns how much the cost of a predicated block should be divided by.
+ /// Forwards to LoopVectorizationCostModel::getPredBlockCostDivisor.
+ unsigned getPredBlockCostDivisor(BasicBlock *BB) const;
+
/// Returns the OperandInfo for \p V, if it is a live-in.
TargetTransformInfo::OperandValueInfo getOperandInfo(VPValue *V) const;
diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
index 80cd112..707886f 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
@@ -3349,7 +3349,7 @@ InstructionCost VPReplicateRecipe::computeCost(ElementCount VF,
// Scale the cost by the probability of executing the predicated blocks.
// This assumes the predicated block for each vector lane is equally
// likely.
- ScalarCost /= getPredBlockCostDivisor(Ctx.CostKind);
+ ScalarCost /= Ctx.getPredBlockCostDivisor(UI->getParent());
return ScalarCost;
}
case Instruction::Load:
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index 634df51..eab6426 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -1420,10 +1420,26 @@ static void narrowToSingleScalarRecipes(VPlan &Plan) {
// broadcasts.
if (!vputils::isSingleScalar(RepOrWidenR) ||
!all_of(RepOrWidenR->users(), [RepOrWidenR](const VPUser *U) {
- return U->usesScalars(RepOrWidenR) ||
- match(cast<VPRecipeBase>(U),
- m_CombineOr(m_ExtractLastElement(m_VPValue()),
- m_ExtractLastLanePerPart(m_VPValue())));
+ if (auto *Store = dyn_cast<VPWidenStoreRecipe>(U)) {
+ // VPWidenStore doesn't have users, and stores are always
+ // profitable to widen: hence, permitting single-scalar stored
+ // values is an important leaf condition. The assert must hold as
+ // we checked the RepOrWidenR operand against
+ // vputils::isSingleScalar.
+ assert(RepOrWidenR == Store->getAddr() ||
+ vputils::isSingleScalar(Store->getStoredValue()));
+ return true;
+ }
+
+ if (auto *VPI = dyn_cast<VPInstruction>(U)) {
+ unsigned Opcode = VPI->getOpcode();
+ if (Opcode == VPInstruction::ExtractLastElement ||
+ Opcode == VPInstruction::ExtractLastLanePerPart ||
+ Opcode == VPInstruction::ExtractPenultimateElement)
+ return true;
+ }
+
+ return U->usesScalars(RepOrWidenR);
}))
continue;
@@ -1745,17 +1761,17 @@ static bool simplifyBranchConditionForVFAndUF(VPlan &Plan, ElementCount BestVF,
if (match(Term, m_BranchOnCount()) ||
match(Term, m_BranchOnCond(m_Not(m_ActiveLaneMask(
m_VPValue(), m_VPValue(), m_VPValue()))))) {
- // Try to simplify the branch condition if TC <= VF * UF when the latch
- // terminator is BranchOnCount or BranchOnCond where the input is
- // Not(ActiveLaneMask).
- const SCEV *TripCount =
- vputils::getSCEVExprForVPValue(Plan.getTripCount(), SE);
- assert(!isa<SCEVCouldNotCompute>(TripCount) &&
+ // Try to simplify the branch condition if VectorTC <= VF * UF when the
+ // latch terminator is BranchOnCount or BranchOnCond(Not(ActiveLaneMask)).
+ const SCEV *VectorTripCount =
+ vputils::getSCEVExprForVPValue(&Plan.getVectorTripCount(), SE);
+ if (isa<SCEVCouldNotCompute>(VectorTripCount))
+ VectorTripCount = vputils::getSCEVExprForVPValue(Plan.getTripCount(), SE);
+ assert(!isa<SCEVCouldNotCompute>(VectorTripCount) &&
"Trip count SCEV must be computable");
ElementCount NumElements = BestVF.multiplyCoefficientBy(BestUF);
- const SCEV *C = SE.getElementCount(TripCount->getType(), NumElements);
- if (TripCount->isZero() ||
- !SE.isKnownPredicate(CmpInst::ICMP_ULE, TripCount, C))
+ const SCEV *C = SE.getElementCount(VectorTripCount->getType(), NumElements);
+ if (!SE.isKnownPredicate(CmpInst::ICMP_ULE, VectorTripCount, C))
return false;
} else if (match(Term, m_BranchOnCond(m_VPValue(Cond)))) {
// For BranchOnCond, check if we can prove the condition to be true using VF
@@ -4131,13 +4147,13 @@ VPlanTransforms::expandSCEVs(VPlan &Plan, ScalarEvolution &SE) {
/// is defined at \p Idx of a load interleave group.
static bool canNarrowLoad(VPWidenRecipe *WideMember0, unsigned OpIdx,
VPValue *OpV, unsigned Idx) {
- auto *DefR = OpV->getDefiningRecipe();
- if (!DefR)
- return WideMember0->getOperand(OpIdx) == OpV;
- if (auto *W = dyn_cast<VPWidenLoadRecipe>(DefR))
- return !W->getMask() && WideMember0->getOperand(OpIdx) == OpV;
-
- if (auto *IR = dyn_cast<VPInterleaveRecipe>(DefR))
+ VPValue *Member0Op = WideMember0->getOperand(OpIdx);
+ VPRecipeBase *Member0OpR = Member0Op->getDefiningRecipe();
+ if (!Member0OpR)
+ return Member0Op == OpV;
+ if (auto *W = dyn_cast<VPWidenLoadRecipe>(Member0OpR))
+ return !W->getMask() && Member0Op == OpV;
+ if (auto *IR = dyn_cast<VPInterleaveRecipe>(Member0OpR))
return IR->getInterleaveGroup()->isFull() && IR->getVPValue(Idx) == OpV;
return false;
}
diff --git a/llvm/test/Analysis/CostModel/RISCV/cast.ll b/llvm/test/Analysis/CostModel/RISCV/cast.ll
index e64bce2..6dacd59 100644
--- a/llvm/test/Analysis/CostModel/RISCV/cast.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/cast.ll
@@ -6239,3 +6239,13 @@ define void @legalization_crash() {
fptoui <192 x float> undef to <192 x i1>
ret void
}
+
+; Test that types that need to be split go through BasicTTIImpl.
+define void @BitInt_crash() {
+; ZVE64X-LABEL: 'BitInt_crash'
+; ZVE64X-NEXT: Cost Model: Found an estimated cost of 2043 for instruction: %1 = bitcast <16 x i64> poison to <512 x i2>
+; ZVE64X-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+ bitcast <16 x i64> poison to <512 x i2>
+ ret void
+}
diff --git a/llvm/test/CodeGen/AArch64/aarch64-combine-add-sub-mul.ll b/llvm/test/CodeGen/AArch64/aarch64-combine-add-sub-mul.ll
index e086ab9..33ea749 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-combine-add-sub-mul.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-combine-add-sub-mul.ll
@@ -52,12 +52,11 @@ define <2 x i64> @test_mul_sub_2x64_2(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c,
; CHECK-NEXT: ptrue p0.d, vl2
; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
; CHECK-NEXT: // kill: def $q1 killed $q1 def $z1
-; CHECK-NEXT: // kill: def $q3 killed $q3 def $z3
; CHECK-NEXT: // kill: def $q2 killed $q2 def $z2
+; CHECK-NEXT: // kill: def $q3 killed $q3 def $z3
; CHECK-NEXT: sdiv z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: mul z1.d, p0/m, z1.d, z3.d
-; CHECK-NEXT: sub v0.2d, v1.2d, v0.2d
+; CHECK-NEXT: mul z2.d, p0/m, z2.d, z3.d
+; CHECK-NEXT: sub v0.2d, v2.2d, v0.2d
; CHECK-NEXT: ret
%div = sdiv <2 x i64> %a, %b
%mul = mul <2 x i64> %c, %d
diff --git a/llvm/test/CodeGen/AArch64/arm64-regress-opt-cmp-signed.mir b/llvm/test/CodeGen/AArch64/arm64-regress-opt-cmp-signed.mir
new file mode 100644
index 0000000..8c31e7c
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/arm64-regress-opt-cmp-signed.mir
@@ -0,0 +1,55 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=aarch64-linux-gnu -run-pass peephole-opt -o - %s | FileCheck %s
+--- |
+ define i32 @test01() nounwind {
+ entry:
+ %0 = select i1 true, i32 1, i32 0
+ %1 = and i32 %0, 65535
+ %2 = icmp sgt i32 %1, 0
+ br i1 %2, label %if.then, label %if.end
+
+ if.then: ; preds = %entry
+ ret i32 1
+
+ if.end: ; preds = %entry
+ ret i32 0
+ }
+...
+---
+name: test01
+registers:
+ - { id: 0, class: gpr32 }
+ - { id: 1, class: gpr32common }
+body: |
+ ; CHECK-LABEL: name: test01
+ ; CHECK: bb.0.entry:
+ ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1
+ ; CHECK-NEXT: [[ANDSWri:%[0-9]+]]:gpr32common = ANDSWri killed [[ANDSWri]], 15, implicit-def $nzcv
+ ; CHECK-NEXT: Bcc 12, %bb.2, implicit $nzcv
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.if.then:
+ ; CHECK-NEXT: $w0 = MOVi32imm 1
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.if.end:
+ ; CHECK-NEXT: $w0 = MOVi32imm 0
+ ; CHECK-NEXT: RET_ReallyLR implicit $w0
+ bb.0.entry:
+ successors: %bb.2.if.end, %bb.1.if.then
+
+ %0 = MOVi32imm 1
+ %1 = ANDWri killed %1, 15
+ $wzr = SUBSWri killed %1, 0, 0, implicit-def $nzcv
+ Bcc 12, %bb.2.if.end, implicit $nzcv
+
+ bb.1.if.then:
+ $w0 = MOVi32imm 1
+ RET_ReallyLR implicit $w0
+
+ bb.2.if.end:
+ $w0 = MOVi32imm 0
+ RET_ReallyLR implicit $w0
+
+...
diff --git a/llvm/test/CodeGen/AArch64/build-vector-two-dup.ll b/llvm/test/CodeGen/AArch64/build-vector-two-dup.ll
index dbbfbea..f725c19 100644
--- a/llvm/test/CodeGen/AArch64/build-vector-two-dup.ll
+++ b/llvm/test/CodeGen/AArch64/build-vector-two-dup.ll
@@ -188,11 +188,11 @@ entry:
define <8 x i8> @test11(ptr nocapture noundef readonly %a, ptr nocapture noundef readonly %b) {
; CHECK-LABEL: test11:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ld1r { v1.8b }, [x0]
-; CHECK-NEXT: ld1r { v2.8b }, [x1]
-; CHECK-NEXT: mov v0.16b, v1.16b
-; CHECK-NEXT: mov v0.h[2], v2.h[0]
-; CHECK-NEXT: mov v0.h[3], v1.h[0]
+; CHECK-NEXT: ld1r { v0.8b }, [x0]
+; CHECK-NEXT: ld1r { v1.8b }, [x1]
+; CHECK-NEXT: fmov d2, d0
+; CHECK-NEXT: mov v0.h[2], v1.h[0]
+; CHECK-NEXT: mov v0.h[3], v2.h[0]
; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
; CHECK-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-contract.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-contract.ll
index 533e831..258eaab 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-contract.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-contract.ll
@@ -14,13 +14,12 @@ define <vscale x 4 x double> @mull_add(<vscale x 4 x double> %a, <vscale x 4 x d
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: fmul z7.d, z0.d, z1.d
; CHECK-NEXT: fmul z1.d, z6.d, z1.d
-; CHECK-NEXT: movprfx z3, z7
-; CHECK-NEXT: fmla z3.d, p0/m, z6.d, z2.d
+; CHECK-NEXT: fmad z6.d, p0/m, z2.d, z7.d
; CHECK-NEXT: fnmsb z0.d, p0/m, z2.d, z1.d
; CHECK-NEXT: uzp2 z1.d, z4.d, z5.d
; CHECK-NEXT: uzp1 z2.d, z4.d, z5.d
; CHECK-NEXT: fadd z2.d, z2.d, z0.d
-; CHECK-NEXT: fadd z1.d, z3.d, z1.d
+; CHECK-NEXT: fadd z1.d, z6.d, z1.d
; CHECK-NEXT: zip1 z0.d, z2.d, z1.d
; CHECK-NEXT: zip2 z1.d, z2.d, z1.d
; CHECK-NEXT: ret
@@ -225,17 +224,14 @@ define <vscale x 4 x double> @mul_add_rot_mull(<vscale x 4 x double> %a, <vscale
; CHECK-NEXT: fmul z1.d, z25.d, z1.d
; CHECK-NEXT: fmul z3.d, z4.d, z24.d
; CHECK-NEXT: fmul z24.d, z5.d, z24.d
-; CHECK-NEXT: movprfx z7, z26
-; CHECK-NEXT: fmla z7.d, p0/m, z25.d, z2.d
+; CHECK-NEXT: fmad z25.d, p0/m, z2.d, z26.d
; CHECK-NEXT: fnmsb z0.d, p0/m, z2.d, z1.d
-; CHECK-NEXT: movprfx z1, z3
-; CHECK-NEXT: fmla z1.d, p0/m, z6.d, z5.d
-; CHECK-NEXT: movprfx z2, z24
-; CHECK-NEXT: fnmls z2.d, p0/m, z4.d, z6.d
-; CHECK-NEXT: fadd z2.d, z0.d, z2.d
-; CHECK-NEXT: fadd z1.d, z7.d, z1.d
-; CHECK-NEXT: zip1 z0.d, z2.d, z1.d
-; CHECK-NEXT: zip2 z1.d, z2.d, z1.d
+; CHECK-NEXT: fmla z3.d, p0/m, z6.d, z5.d
+; CHECK-NEXT: fnmsb z4.d, p0/m, z6.d, z24.d
+; CHECK-NEXT: fadd z1.d, z0.d, z4.d
+; CHECK-NEXT: fadd z2.d, z25.d, z3.d
+; CHECK-NEXT: zip1 z0.d, z1.d, z2.d
+; CHECK-NEXT: zip2 z1.d, z1.d, z2.d
; CHECK-NEXT: ret
entry:
%strided.vec = tail call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.vector.deinterleave2.nxv4f64(<vscale x 4 x double> %a)
diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-fast.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-fast.ll
index 1eed972..b68c009 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-fast.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-add-mull-scalable-fast.ll
@@ -200,12 +200,10 @@ define <vscale x 4 x double> @mul_add_rot_mull(<vscale x 4 x double> %a, <vscale
; CHECK-NEXT: fmul z3.d, z2.d, z25.d
; CHECK-NEXT: fmul z25.d, z24.d, z25.d
; CHECK-NEXT: fmla z3.d, p0/m, z24.d, z0.d
-; CHECK-NEXT: movprfx z24, z25
-; CHECK-NEXT: fmla z24.d, p0/m, z26.d, z1.d
-; CHECK-NEXT: movprfx z6, z24
-; CHECK-NEXT: fmla z6.d, p0/m, z5.d, z4.d
+; CHECK-NEXT: fmla z25.d, p0/m, z26.d, z1.d
+; CHECK-NEXT: fmla z25.d, p0/m, z5.d, z4.d
; CHECK-NEXT: fmla z3.d, p0/m, z26.d, z4.d
-; CHECK-NEXT: fnmsb z2.d, p0/m, z0.d, z6.d
+; CHECK-NEXT: fnmsb z2.d, p0/m, z0.d, z25.d
; CHECK-NEXT: fmsb z1.d, p0/m, z5.d, z3.d
; CHECK-NEXT: zip1 z0.d, z2.d, z1.d
; CHECK-NEXT: zip2 z1.d, z2.d, z1.d
diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-f16-add-scalable.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-f16-add-scalable.ll
index c2fc959..583391c 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-f16-add-scalable.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-f16-add-scalable.ll
@@ -17,11 +17,10 @@ define <vscale x 4 x half> @complex_add_v4f16(<vscale x 4 x half> %a, <vscale x
; CHECK-NEXT: uunpklo z3.d, z3.s
; CHECK-NEXT: uunpklo z1.d, z1.s
; CHECK-NEXT: fsubr z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT: movprfx z1, z3
-; CHECK-NEXT: fadd z1.h, p0/m, z1.h, z2.h
-; CHECK-NEXT: zip2 z2.d, z0.d, z1.d
-; CHECK-NEXT: zip1 z0.d, z0.d, z1.d
-; CHECK-NEXT: uzp1 z0.s, z0.s, z2.s
+; CHECK-NEXT: fadd z2.h, p0/m, z2.h, z3.h
+; CHECK-NEXT: zip2 z1.d, z0.d, z2.d
+; CHECK-NEXT: zip1 z0.d, z0.d, z2.d
+; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s
; CHECK-NEXT: ret
entry:
%a.deinterleaved = tail call { <vscale x 2 x half>, <vscale x 2 x half> } @llvm.vector.deinterleave2.nxv4f16(<vscale x 4 x half> %a)
diff --git a/llvm/test/CodeGen/AArch64/complex-deinterleaving-i16-mul-scalable.ll b/llvm/test/CodeGen/AArch64/complex-deinterleaving-i16-mul-scalable.ll
index 061fd07..00b0095 100644
--- a/llvm/test/CodeGen/AArch64/complex-deinterleaving-i16-mul-scalable.ll
+++ b/llvm/test/CodeGen/AArch64/complex-deinterleaving-i16-mul-scalable.ll
@@ -18,11 +18,10 @@ define <vscale x 4 x i16> @complex_mul_v4i16(<vscale x 4 x i16> %a, <vscale x 4
; CHECK-NEXT: uzp2 z1.d, z1.d, z3.d
; CHECK-NEXT: mul z5.d, z2.d, z0.d
; CHECK-NEXT: mul z2.d, z2.d, z4.d
-; CHECK-NEXT: movprfx z3, z5
-; CHECK-NEXT: mla z3.d, p0/m, z1.d, z4.d
+; CHECK-NEXT: mad z4.d, p0/m, z1.d, z5.d
; CHECK-NEXT: msb z0.d, p0/m, z1.d, z2.d
-; CHECK-NEXT: zip2 z1.d, z0.d, z3.d
-; CHECK-NEXT: zip1 z0.d, z0.d, z3.d
+; CHECK-NEXT: zip2 z1.d, z0.d, z4.d
+; CHECK-NEXT: zip1 z0.d, z0.d, z4.d
; CHECK-NEXT: uzp1 z0.s, z0.s, z1.s
; CHECK-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/AArch64/icmp-ult-eq-fold.ll b/llvm/test/CodeGen/AArch64/icmp-ult-eq-fold.ll
index 33c5ba7..8297fa2 100644
--- a/llvm/test/CodeGen/AArch64/icmp-ult-eq-fold.ll
+++ b/llvm/test/CodeGen/AArch64/icmp-ult-eq-fold.ll
@@ -161,6 +161,338 @@ define i1 @lt64_u16_and_23(i64 %0) {
ret i1 %3
}
+define i1 @test_disjoint(i1 %0, i32 %1, i32 %2) {
+; CHECK-LABEL: test_disjoint:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr w9, w2, #0x800000
+; CHECK-NEXT: lsl w8, w8, w1
+; CHECK-NEXT: tst w9, w8
+; CHECK-NEXT: cset w8, eq
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i32 %2, 8388608
+ %4 = shl nuw i32 1, %1
+ %5 = and i32 %3, %4
+ %6 = icmp eq i32 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint2(i1 %0, i32 %1, i32 %2) {
+; CHECK-LABEL: test_disjoint2:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr w9, w2, #0x800000
+; CHECK-NEXT: lsl w8, w8, w1
+; CHECK-NEXT: tst w9, w8
+; CHECK-NEXT: cset w8, gt
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i32 %2, 8388608
+ %4 = shl nuw i32 1, %1
+ %5 = and i32 %3, %4
+ %6 = icmp sgt i32 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint3(i1 %0, i32 %1, i32 %2) {
+; CHECK-LABEL: test_disjoint3:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr w9, w2, #0x800000
+; CHECK-NEXT: lsl w8, w8, w1
+; CHECK-NEXT: tst w9, w8
+; CHECK-NEXT: cset w8, mi
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i32 %2, 8388608
+ %4 = shl nuw i32 1, %1
+ %5 = and i32 %3, %4
+ %6 = icmp slt i32 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint4(i1 %0, i32 %1, i32 %2) {
+; CHECK-LABEL: test_disjoint4:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr w9, w2, #0x800000
+; CHECK-NEXT: lsl w8, w8, w1
+; CHECK-NEXT: and w8, w9, w8
+; CHECK-NEXT: cmp w8, #1
+; CHECK-NEXT: cset w8, lt
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i32 %2, 8388608
+ %4 = shl nuw i32 1, %1
+ %5 = and i32 %3, %4
+ %6 = icmp sle i32 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint_inverse_4(i1 %0, i32 %1, i32 %2) {
+; CHECK-LABEL: test_disjoint_inverse_4:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr w9, w2, #0x800000
+; CHECK-NEXT: lsl w8, w8, w1
+; CHECK-NEXT: bic w8, w9, w8
+; CHECK-NEXT: cmp w8, #1
+; CHECK-NEXT: cset w8, lt
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i32 %2, 8388608
+ %4 = shl nuw i32 1, %1
+ %not = xor i32 %4, -1
+ %5 = and i32 %3, %not
+ %6 = icmp sle i32 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint_inverse(i1 %0, i32 %1, i32 %2) {
+; CHECK-LABEL: test_disjoint_inverse:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr w9, w2, #0x800000
+; CHECK-NEXT: lsl w8, w8, w1
+; CHECK-NEXT: bics wzr, w9, w8
+; CHECK-NEXT: cset w8, eq
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i32 %2, 8388608
+ %4 = shl nuw i32 1, %1
+ %not = xor i32 %4, -1
+ %5 = and i32 %3, %not
+ %6 = icmp eq i32 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint2_inverse(i1 %0, i32 %1, i32 %2) {
+; CHECK-LABEL: test_disjoint2_inverse:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr w9, w2, #0x800000
+; CHECK-NEXT: lsl w8, w8, w1
+; CHECK-NEXT: bics wzr, w9, w8
+; CHECK-NEXT: cset w8, gt
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i32 %2, 8388608
+ %4 = shl nuw i32 1, %1
+ %not = xor i32 %4, -1
+ %5 = and i32 %3, %not
+ %6 = icmp sgt i32 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint3_inverse(i1 %0, i32 %1, i32 %2) {
+; CHECK-LABEL: test_disjoint3_inverse:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr w9, w2, #0x800000
+; CHECK-NEXT: lsl w8, w8, w1
+; CHECK-NEXT: bics wzr, w9, w8
+; CHECK-NEXT: cset w8, mi
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i32 %2, 8388608
+ %4 = shl nuw i32 1, %1
+ %not = xor i32 %4, -1
+ %5 = and i32 %3, %not
+ %6 = icmp slt i32 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint_64(i1 %0, i64 %1, i64 %2) {
+; CHECK-LABEL: test_disjoint_64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr x9, x2, #0x80000000000000
+; CHECK-NEXT: lsl x8, x8, x1
+; CHECK-NEXT: tst x9, x8
+; CHECK-NEXT: cset w8, eq
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i64 %2, 36028797018963968
+ %4 = shl nuw i64 1, %1
+ %5 = and i64 %3, %4
+ %6 = icmp eq i64 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint2_64(i1 %0, i64 %1, i64 %2) {
+; CHECK-LABEL: test_disjoint2_64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr x9, x2, #0x80000000000000
+; CHECK-NEXT: lsl x8, x8, x1
+; CHECK-NEXT: tst x9, x8
+; CHECK-NEXT: cset w8, gt
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i64 %2, 36028797018963968
+ %4 = shl nuw i64 1, %1
+ %5 = and i64 %3, %4
+ %6 = icmp sgt i64 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint3_64(i1 %0, i64 %1, i64 %2) {
+; CHECK-LABEL: test_disjoint3_64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr x9, x2, #0x80000000000000
+; CHECK-NEXT: lsl x8, x8, x1
+; CHECK-NEXT: tst x9, x8
+; CHECK-NEXT: cset w8, mi
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i64 %2, 36028797018963968
+ %4 = shl nuw i64 1, %1
+ %5 = and i64 %3, %4
+ %6 = icmp slt i64 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint4_64(i1 %0, i64 %1, i64 %2) {
+; CHECK-LABEL: test_disjoint4_64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr x9, x2, #0x80000000000000
+; CHECK-NEXT: lsl x8, x8, x1
+; CHECK-NEXT: and x8, x9, x8
+; CHECK-NEXT: cmp x8, #1
+; CHECK-NEXT: cset w8, lt
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i64 %2, 36028797018963968
+ %4 = shl nuw i64 1, %1
+ %5 = and i64 %3, %4
+ %6 = icmp sle i64 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint_inverse_4_64(i1 %0, i64 %1, i64 %2) {
+; CHECK-LABEL: test_disjoint_inverse_4_64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr x9, x2, #0x80000000000000
+; CHECK-NEXT: lsl x8, x8, x1
+; CHECK-NEXT: bic x8, x9, x8
+; CHECK-NEXT: cmp x8, #1
+; CHECK-NEXT: cset w8, lt
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i64 %2, 36028797018963968
+ %4 = shl nuw i64 1, %1
+ %not = xor i64 %4, -1
+ %5 = and i64 %3, %not
+ %6 = icmp sle i64 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint_inverse_64(i1 %0, i64 %1, i64 %2) {
+; CHECK-LABEL: test_disjoint_inverse_64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr x9, x2, #0x80000000000000
+; CHECK-NEXT: lsl x8, x8, x1
+; CHECK-NEXT: bics xzr, x9, x8
+; CHECK-NEXT: cset w8, eq
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i64 %2, 36028797018963968
+ %4 = shl nuw i64 1, %1
+ %not = xor i64 %4, -1
+ %5 = and i64 %3, %not
+ %6 = icmp eq i64 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint2_inverse_64(i1 %0, i64 %1, i64 %2) {
+; CHECK-LABEL: test_disjoint2_inverse_64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr x9, x2, #0x80000000000000
+; CHECK-NEXT: lsl x8, x8, x1
+; CHECK-NEXT: bics xzr, x9, x8
+; CHECK-NEXT: cset w8, gt
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i64 %2, 36028797018963968
+ %4 = shl nuw i64 1, %1
+ %not = xor i64 %4, -1
+ %5 = and i64 %3, %not
+ %6 = icmp sgt i64 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
+define i1 @test_disjoint3_inverse_64(i1 %0, i64 %1, i64 %2) {
+; CHECK-LABEL: test_disjoint3_inverse_64:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: orr x9, x2, #0x80000000000000
+; CHECK-NEXT: lsl x8, x8, x1
+; CHECK-NEXT: bics xzr, x9, x8
+; CHECK-NEXT: cset w8, mi
+; CHECK-NEXT: orr w8, w0, w8
+; CHECK-NEXT: and w0, w8, #0x1
+; CHECK-NEXT: ret
+entry:
+ %3 = or disjoint i64 %2, 36028797018963968
+ %4 = shl nuw i64 1, %1
+ %not = xor i64 %4, -1
+ %5 = and i64 %3, %not
+ %6 = icmp slt i64 %5, 0
+ %7 = select i1 %0, i1 true, i1 %6
+ ret i1 %7
+}
+
; negative test
define i1 @lt3_u8(i8 %0) {
; CHECK-LABEL: lt3_u8:
diff --git a/llvm/test/CodeGen/AArch64/llvm-ir-to-intrinsic.ll b/llvm/test/CodeGen/AArch64/llvm-ir-to-intrinsic.ll
index 47fae5a..f0abbaa 100644
--- a/llvm/test/CodeGen/AArch64/llvm-ir-to-intrinsic.ll
+++ b/llvm/test/CodeGen/AArch64/llvm-ir-to-intrinsic.ll
@@ -1148,11 +1148,10 @@ define <vscale x 4 x i64> @fshl_rot_illegal_i64(<vscale x 4 x i64> %a, <vscale x
; CHECK-NEXT: and z3.d, z3.d, #0x3f
; CHECK-NEXT: lslr z4.d, p0/m, z4.d, z0.d
; CHECK-NEXT: lsr z0.d, p0/m, z0.d, z2.d
-; CHECK-NEXT: movprfx z2, z1
-; CHECK-NEXT: lsl z2.d, p0/m, z2.d, z5.d
+; CHECK-NEXT: lslr z5.d, p0/m, z5.d, z1.d
; CHECK-NEXT: lsr z1.d, p0/m, z1.d, z3.d
; CHECK-NEXT: orr z0.d, z4.d, z0.d
-; CHECK-NEXT: orr z1.d, z2.d, z1.d
+; CHECK-NEXT: orr z1.d, z5.d, z1.d
; CHECK-NEXT: ret
%fshl = call <vscale x 4 x i64> @llvm.fshl.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %a, <vscale x 4 x i64> %b)
ret <vscale x 4 x i64> %fshl
diff --git a/llvm/test/CodeGen/AArch64/llvm.sincospi.error.ll b/llvm/test/CodeGen/AArch64/llvm.sincospi.error.ll
new file mode 100644
index 0000000..d074d9a
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/llvm.sincospi.error.ll
@@ -0,0 +1,13 @@
+; RUN: not llc -mtriple=aarch64-gnu-linux -filetype=null %s 2>&1 | FileCheck %s
+
+; CHECK: error: no libcall available for fsincospi
+define { float, float } @test_sincospi_f32(float %a) {
+ %result = call { float, float } @llvm.sincospi.f32(float %a)
+ ret { float, float } %result
+}
+
+; CHECK: error: no libcall available for fsincospi
+define { double, double } @test_sincospi_f64(double %a) {
+ %result = call { double, double } @llvm.sincospi.f64(double %a)
+ ret { double, double } %result
+}
diff --git a/llvm/test/CodeGen/AArch64/llvm.sincospi.ll b/llvm/test/CodeGen/AArch64/llvm.sincospi.ll
index d1d7d92..b386df0 100644
--- a/llvm/test/CodeGen/AArch64/llvm.sincospi.ll
+++ b/llvm/test/CodeGen/AArch64/llvm.sincospi.ll
@@ -1,268 +1,250 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
-; RUN: llc -mtriple=aarch64-gnu-linux < %s | FileCheck -check-prefixes=CHECK %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -mtriple=arm64-apple-macosx10.9 < %s | FileCheck %s
-define { half, half } @test_sincospi_f16(half %a) {
+define { half, half } @test_sincospi_f16(half %a) #0 {
; CHECK-LABEL: test_sincospi_f16:
-; CHECK: // %bb.0:
-; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK: ; %bb.0:
+; CHECK-NEXT: sub sp, sp, #32
; CHECK-NEXT: fcvt s0, h0
; CHECK-NEXT: add x0, sp, #12
; CHECK-NEXT: add x1, sp, #8
-; CHECK-NEXT: bl sincospif
+; CHECK-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill
+; CHECK-NEXT: bl ___sincospif
; CHECK-NEXT: ldp s1, s0, [sp, #8]
+; CHECK-NEXT: ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
; CHECK-NEXT: fcvt h0, s0
; CHECK-NEXT: fcvt h1, s1
-; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #32
; CHECK-NEXT: ret
%result = call { half, half } @llvm.sincospi.f16(half %a)
ret { half, half } %result
}
-define half @test_sincospi_f16_only_use_sin(half %a) {
+define half @test_sincospi_f16_only_use_sin(half %a) #0 {
; CHECK-LABEL: test_sincospi_f16_only_use_sin:
-; CHECK: // %bb.0:
-; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK: ; %bb.0:
+; CHECK-NEXT: sub sp, sp, #32
; CHECK-NEXT: fcvt s0, h0
; CHECK-NEXT: add x0, sp, #12
; CHECK-NEXT: add x1, sp, #8
-; CHECK-NEXT: bl sincospif
+; CHECK-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill
+; CHECK-NEXT: bl ___sincospif
; CHECK-NEXT: ldr s0, [sp, #12]
+; CHECK-NEXT: ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
; CHECK-NEXT: fcvt h0, s0
-; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #32
; CHECK-NEXT: ret
%result = call { half, half } @llvm.sincospi.f16(half %a)
%result.0 = extractvalue { half, half } %result, 0
ret half %result.0
}
-define half @test_sincospi_f16_only_use_cos(half %a) {
+define half @test_sincospi_f16_only_use_cos(half %a) #0 {
; CHECK-LABEL: test_sincospi_f16_only_use_cos:
-; CHECK: // %bb.0:
-; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK: ; %bb.0:
+; CHECK-NEXT: sub sp, sp, #32
; CHECK-NEXT: fcvt s0, h0
; CHECK-NEXT: add x0, sp, #12
; CHECK-NEXT: add x1, sp, #8
-; CHECK-NEXT: bl sincospif
+; CHECK-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill
+; CHECK-NEXT: bl ___sincospif
; CHECK-NEXT: ldr s0, [sp, #8]
+; CHECK-NEXT: ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
; CHECK-NEXT: fcvt h0, s0
-; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #32
; CHECK-NEXT: ret
%result = call { half, half } @llvm.sincospi.f16(half %a)
%result.1 = extractvalue { half, half } %result, 1
ret half %result.1
}
-define { <2 x half>, <2 x half> } @test_sincospi_v2f16(<2 x half> %a) {
+define { <2 x half>, <2 x half> } @test_sincospi_v2f16(<2 x half> %a) #0 {
; CHECK-LABEL: test_sincospi_v2f16:
-; CHECK: // %bb.0:
+; CHECK: ; %bb.0:
; CHECK-NEXT: sub sp, sp, #64
-; CHECK-NEXT: str x30, [sp, #48] // 8-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 64
-; CHECK-NEXT: .cfi_offset w30, -16
-; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: mov h1, v0.h[1]
-; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
-; CHECK-NEXT: add x0, sp, #36
-; CHECK-NEXT: add x1, sp, #32
-; CHECK-NEXT: fcvt s0, h1
-; CHECK-NEXT: bl sincospif
-; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: ; kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT: mov h1, v0[1]
+; CHECK-NEXT: str q0, [sp] ; 16-byte Folded Spill
; CHECK-NEXT: add x0, sp, #28
; CHECK-NEXT: add x1, sp, #24
+; CHECK-NEXT: stp x29, x30, [sp, #48] ; 16-byte Folded Spill
+; CHECK-NEXT: fcvt s0, h1
+; CHECK-NEXT: bl ___sincospif
+; CHECK-NEXT: ldr q0, [sp] ; 16-byte Folded Reload
+; CHECK-NEXT: add x0, sp, #20
+; CHECK-NEXT: add x1, sp, #16
; CHECK-NEXT: fcvt s0, h0
-; CHECK-NEXT: bl sincospif
-; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: bl ___sincospif
+; CHECK-NEXT: ldr q0, [sp] ; 16-byte Folded Reload
+; CHECK-NEXT: add x0, sp, #36
+; CHECK-NEXT: add x1, sp, #32
+; CHECK-NEXT: mov h0, v0[2]
+; CHECK-NEXT: fcvt s0, h0
+; CHECK-NEXT: bl ___sincospif
+; CHECK-NEXT: ldr q0, [sp] ; 16-byte Folded Reload
; CHECK-NEXT: add x0, sp, #44
; CHECK-NEXT: add x1, sp, #40
-; CHECK-NEXT: mov h0, v0.h[2]
-; CHECK-NEXT: fcvt s0, h0
-; CHECK-NEXT: bl sincospif
-; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
-; CHECK-NEXT: add x0, sp, #60
-; CHECK-NEXT: add x1, sp, #56
-; CHECK-NEXT: mov h0, v0.h[3]
+; CHECK-NEXT: mov h0, v0[3]
; CHECK-NEXT: fcvt s0, h0
-; CHECK-NEXT: bl sincospif
-; CHECK-NEXT: ldp s2, s0, [sp, #32]
-; CHECK-NEXT: ldr x30, [sp, #48] // 8-byte Folded Reload
-; CHECK-NEXT: ldp s3, s1, [sp, #24]
+; CHECK-NEXT: bl ___sincospif
+; CHECK-NEXT: ldp s2, s0, [sp, #24]
+; CHECK-NEXT: ldp s3, s1, [sp, #16]
+; CHECK-NEXT: ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
; CHECK-NEXT: fcvt h4, s0
; CHECK-NEXT: fcvt h2, s2
; CHECK-NEXT: fcvt h0, s1
; CHECK-NEXT: fcvt h1, s3
-; CHECK-NEXT: ldp s5, s3, [sp, #40]
+; CHECK-NEXT: ldp s5, s3, [sp, #32]
; CHECK-NEXT: fcvt h3, s3
-; CHECK-NEXT: mov v0.h[1], v4.h[0]
+; CHECK-NEXT: mov.h v0[1], v4[0]
; CHECK-NEXT: fcvt h4, s5
-; CHECK-NEXT: mov v1.h[1], v2.h[0]
-; CHECK-NEXT: ldp s5, s2, [sp, #56]
-; CHECK-NEXT: mov v0.h[2], v3.h[0]
+; CHECK-NEXT: mov.h v1[1], v2[0]
+; CHECK-NEXT: ldp s5, s2, [sp, #40]
+; CHECK-NEXT: mov.h v0[2], v3[0]
; CHECK-NEXT: fcvt h2, s2
; CHECK-NEXT: fcvt h3, s5
-; CHECK-NEXT: mov v1.h[2], v4.h[0]
-; CHECK-NEXT: mov v0.h[3], v2.h[0]
-; CHECK-NEXT: mov v1.h[3], v3.h[0]
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-NEXT: // kill: def $d1 killed $d1 killed $q1
+; CHECK-NEXT: mov.h v1[2], v4[0]
+; CHECK-NEXT: mov.h v0[3], v2[0]
+; CHECK-NEXT: mov.h v1[3], v3[0]
+; CHECK-NEXT: ; kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: ; kill: def $d1 killed $d1 killed $q1
; CHECK-NEXT: add sp, sp, #64
; CHECK-NEXT: ret
%result = call { <2 x half>, <2 x half> } @llvm.sincospi.v2f16(<2 x half> %a)
ret { <2 x half>, <2 x half> } %result
}
-define { float, float } @test_sincospi_f32(float %a) {
+define { float, float } @test_sincospi_f32(float %a) #0 {
; CHECK-LABEL: test_sincospi_f32:
-; CHECK: // %bb.0:
-; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK: ; %bb.0:
+; CHECK-NEXT: sub sp, sp, #32
; CHECK-NEXT: add x0, sp, #12
; CHECK-NEXT: add x1, sp, #8
-; CHECK-NEXT: bl sincospif
+; CHECK-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill
+; CHECK-NEXT: bl ___sincospif
; CHECK-NEXT: ldp s1, s0, [sp, #8]
-; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #32
; CHECK-NEXT: ret
%result = call { float, float } @llvm.sincospi.f32(float %a)
ret { float, float } %result
}
-define { <3 x float>, <3 x float> } @test_sincospi_v3f32(<3 x float> %a) {
+define { <3 x float>, <3 x float> } @test_sincospi_v3f32(<3 x float> %a) #0 {
; CHECK-LABEL: test_sincospi_v3f32:
-; CHECK: // %bb.0:
-; CHECK-NEXT: sub sp, sp, #80
-; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill
-; CHECK-NEXT: stp x22, x21, [sp, #48] // 16-byte Folded Spill
-; CHECK-NEXT: stp x20, x19, [sp, #64] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 80
-; CHECK-NEXT: .cfi_offset w19, -8
-; CHECK-NEXT: .cfi_offset w20, -16
-; CHECK-NEXT: .cfi_offset w21, -24
-; CHECK-NEXT: .cfi_offset w22, -32
-; CHECK-NEXT: .cfi_offset w30, -48
-; CHECK-NEXT: add x0, sp, #20
-; CHECK-NEXT: add x1, sp, #16
-; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
-; CHECK-NEXT: // kill: def $s0 killed $s0 killed $q0
-; CHECK-NEXT: bl sincospif
-; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
+; CHECK: ; %bb.0:
+; CHECK-NEXT: sub sp, sp, #96
; CHECK-NEXT: add x0, sp, #28
; CHECK-NEXT: add x1, sp, #24
-; CHECK-NEXT: add x19, sp, #28
-; CHECK-NEXT: add x20, sp, #24
-; CHECK-NEXT: mov s0, v0.s[1]
-; CHECK-NEXT: bl sincospif
-; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: stp x22, x21, [sp, #48] ; 16-byte Folded Spill
+; CHECK-NEXT: stp x20, x19, [sp, #64] ; 16-byte Folded Spill
+; CHECK-NEXT: stp x29, x30, [sp, #80] ; 16-byte Folded Spill
+; CHECK-NEXT: str q0, [sp] ; 16-byte Folded Spill
+; CHECK-NEXT: ; kill: def $s0 killed $s0 killed $q0
+; CHECK-NEXT: bl ___sincospif
+; CHECK-NEXT: ldr q0, [sp] ; 16-byte Folded Reload
+; CHECK-NEXT: add x0, sp, #36
+; CHECK-NEXT: add x1, sp, #32
+; CHECK-NEXT: add x19, sp, #36
+; CHECK-NEXT: add x20, sp, #32
+; CHECK-NEXT: mov s0, v0[1]
+; CHECK-NEXT: bl ___sincospif
+; CHECK-NEXT: ldr q0, [sp] ; 16-byte Folded Reload
; CHECK-NEXT: add x0, sp, #44
; CHECK-NEXT: add x1, sp, #40
; CHECK-NEXT: add x21, sp, #44
; CHECK-NEXT: add x22, sp, #40
-; CHECK-NEXT: mov s0, v0.s[2]
-; CHECK-NEXT: bl sincospif
-; CHECK-NEXT: ldp s1, s0, [sp, #16]
-; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload
-; CHECK-NEXT: ld1 { v0.s }[1], [x19]
-; CHECK-NEXT: ld1 { v1.s }[1], [x20]
-; CHECK-NEXT: ldp x20, x19, [sp, #64] // 16-byte Folded Reload
-; CHECK-NEXT: ld1 { v0.s }[2], [x21]
-; CHECK-NEXT: ld1 { v1.s }[2], [x22]
-; CHECK-NEXT: ldp x22, x21, [sp, #48] // 16-byte Folded Reload
-; CHECK-NEXT: add sp, sp, #80
+; CHECK-NEXT: mov s0, v0[2]
+; CHECK-NEXT: bl ___sincospif
+; CHECK-NEXT: ldp s1, s0, [sp, #24]
+; CHECK-NEXT: ldp x29, x30, [sp, #80] ; 16-byte Folded Reload
+; CHECK-NEXT: ld1.s { v0 }[1], [x19]
+; CHECK-NEXT: ld1.s { v1 }[1], [x20]
+; CHECK-NEXT: ldp x20, x19, [sp, #64] ; 16-byte Folded Reload
+; CHECK-NEXT: ld1.s { v0 }[2], [x21]
+; CHECK-NEXT: ld1.s { v1 }[2], [x22]
+; CHECK-NEXT: ldp x22, x21, [sp, #48] ; 16-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #96
; CHECK-NEXT: ret
%result = call { <3 x float>, <3 x float> } @llvm.sincospi.v3f32(<3 x float> %a)
ret { <3 x float>, <3 x float> } %result
}
-define { <2 x float>, <2 x float> } @test_sincospi_v2f32(<2 x float> %a) {
+define { <2 x float>, <2 x float> } @test_sincospi_v2f32(<2 x float> %a) #0 {
; CHECK-LABEL: test_sincospi_v2f32:
-; CHECK: // %bb.0:
+; CHECK: ; %bb.0:
; CHECK-NEXT: sub sp, sp, #64
-; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill
-; CHECK-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 64
-; CHECK-NEXT: .cfi_offset w19, -8
-; CHECK-NEXT: .cfi_offset w20, -16
-; CHECK-NEXT: .cfi_offset w30, -32
-; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: add x0, sp, #44
-; CHECK-NEXT: add x1, sp, #40
-; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
-; CHECK-NEXT: // kill: def $s0 killed $s0 killed $q0
-; CHECK-NEXT: bl sincospif
-; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-NEXT: ; kill: def $d0 killed $d0 def $q0
; CHECK-NEXT: add x0, sp, #28
; CHECK-NEXT: add x1, sp, #24
-; CHECK-NEXT: add x19, sp, #28
-; CHECK-NEXT: add x20, sp, #24
-; CHECK-NEXT: mov s0, v0.s[1]
-; CHECK-NEXT: bl sincospif
-; CHECK-NEXT: ldp s1, s0, [sp, #40]
-; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload
-; CHECK-NEXT: ld1 { v0.s }[1], [x19]
-; CHECK-NEXT: ld1 { v1.s }[1], [x20]
-; CHECK-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-NEXT: // kill: def $d1 killed $d1 killed $q1
+; CHECK-NEXT: stp x20, x19, [sp, #32] ; 16-byte Folded Spill
+; CHECK-NEXT: stp x29, x30, [sp, #48] ; 16-byte Folded Spill
+; CHECK-NEXT: str q0, [sp] ; 16-byte Folded Spill
+; CHECK-NEXT: ; kill: def $s0 killed $s0 killed $q0
+; CHECK-NEXT: bl ___sincospif
+; CHECK-NEXT: ldr q0, [sp] ; 16-byte Folded Reload
+; CHECK-NEXT: add x0, sp, #20
+; CHECK-NEXT: add x1, sp, #16
+; CHECK-NEXT: add x19, sp, #20
+; CHECK-NEXT: add x20, sp, #16
+; CHECK-NEXT: mov s0, v0[1]
+; CHECK-NEXT: bl ___sincospif
+; CHECK-NEXT: ldp s1, s0, [sp, #24]
+; CHECK-NEXT: ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
+; CHECK-NEXT: ld1.s { v0 }[1], [x19]
+; CHECK-NEXT: ld1.s { v1 }[1], [x20]
+; CHECK-NEXT: ldp x20, x19, [sp, #32] ; 16-byte Folded Reload
+; CHECK-NEXT: ; kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: ; kill: def $d1 killed $d1 killed $q1
; CHECK-NEXT: add sp, sp, #64
; CHECK-NEXT: ret
%result = call { <2 x float>, <2 x float> } @llvm.sincospi.v2f32(<2 x float> %a)
ret { <2 x float>, <2 x float> } %result
}
-define { double, double } @test_sincospi_f64(double %a) {
+define { double, double } @test_sincospi_f64(double %a) #0 {
; CHECK-LABEL: test_sincospi_f64:
-; CHECK: // %bb.0:
+; CHECK: ; %bb.0:
; CHECK-NEXT: sub sp, sp, #32
-; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: .cfi_offset w30, -16
-; CHECK-NEXT: add x0, sp, #24
-; CHECK-NEXT: add x1, sp, #8
-; CHECK-NEXT: bl sincospi
-; CHECK-NEXT: ldr d0, [sp, #24]
-; CHECK-NEXT: ldr d1, [sp, #8]
-; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-NEXT: add x0, sp, #8
+; CHECK-NEXT: mov x1, sp
+; CHECK-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill
+; CHECK-NEXT: bl ___sincospi
+; CHECK-NEXT: ldp d1, d0, [sp]
+; CHECK-NEXT: ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
; CHECK-NEXT: add sp, sp, #32
; CHECK-NEXT: ret
%result = call { double, double } @llvm.sincospi.f64(double %a)
ret { double, double } %result
}
-define { <2 x double>, <2 x double> } @test_sincospi_v2f64(<2 x double> %a) {
+define { <2 x double>, <2 x double> } @test_sincospi_v2f64(<2 x double> %a) #0 {
; CHECK-LABEL: test_sincospi_v2f64:
-; CHECK: // %bb.0:
+; CHECK: ; %bb.0:
; CHECK-NEXT: sub sp, sp, #80
-; CHECK-NEXT: str x30, [sp, #48] // 8-byte Folded Spill
-; CHECK-NEXT: stp x20, x19, [sp, #64] // 16-byte Folded Spill
-; CHECK-NEXT: .cfi_def_cfa_offset 80
-; CHECK-NEXT: .cfi_offset w19, -8
-; CHECK-NEXT: .cfi_offset w20, -16
-; CHECK-NEXT: .cfi_offset w30, -32
-; CHECK-NEXT: add x0, sp, #56
-; CHECK-NEXT: add x1, sp, #40
-; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
-; CHECK-NEXT: bl sincospi
-; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
-; CHECK-NEXT: add x0, sp, #32
-; CHECK-NEXT: add x1, sp, #24
-; CHECK-NEXT: add x19, sp, #32
-; CHECK-NEXT: add x20, sp, #24
-; CHECK-NEXT: mov d0, v0.d[1]
-; CHECK-NEXT: bl sincospi
-; CHECK-NEXT: ldr d0, [sp, #56]
-; CHECK-NEXT: ldr d1, [sp, #40]
-; CHECK-NEXT: ldr x30, [sp, #48] // 8-byte Folded Reload
-; CHECK-NEXT: ld1 { v0.d }[1], [x19]
-; CHECK-NEXT: ld1 { v1.d }[1], [x20]
-; CHECK-NEXT: ldp x20, x19, [sp, #64] // 16-byte Folded Reload
+; CHECK-NEXT: add x0, sp, #40
+; CHECK-NEXT: add x1, sp, #32
+; CHECK-NEXT: stp x20, x19, [sp, #48] ; 16-byte Folded Spill
+; CHECK-NEXT: stp x29, x30, [sp, #64] ; 16-byte Folded Spill
+; CHECK-NEXT: str q0, [sp] ; 16-byte Folded Spill
+; CHECK-NEXT: ; kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: bl ___sincospi
+; CHECK-NEXT: ldr q0, [sp] ; 16-byte Folded Reload
+; CHECK-NEXT: add x0, sp, #24
+; CHECK-NEXT: add x1, sp, #16
+; CHECK-NEXT: add x19, sp, #24
+; CHECK-NEXT: add x20, sp, #16
+; CHECK-NEXT: mov d0, v0[1]
+; CHECK-NEXT: bl ___sincospi
+; CHECK-NEXT: ldp d1, d0, [sp, #32]
+; CHECK-NEXT: ldp x29, x30, [sp, #64] ; 16-byte Folded Reload
+; CHECK-NEXT: ld1.d { v0 }[1], [x19]
+; CHECK-NEXT: ld1.d { v1 }[1], [x20]
+; CHECK-NEXT: ldp x20, x19, [sp, #48] ; 16-byte Folded Reload
; CHECK-NEXT: add sp, sp, #80
; CHECK-NEXT: ret
%result = call { <2 x double>, <2 x double> } @llvm.sincospi.v2f64(<2 x double> %a)
ret { <2 x double>, <2 x double> } %result
}
+
+attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/AArch64/machine-licm-sink-instr.ll b/llvm/test/CodeGen/AArch64/machine-licm-sink-instr.ll
index 3230c9e..b3a7ec9 100644
--- a/llvm/test/CodeGen/AArch64/machine-licm-sink-instr.ll
+++ b/llvm/test/CodeGen/AArch64/machine-licm-sink-instr.ll
@@ -20,20 +20,17 @@ define i32 @sink_load_and_copy(i32 %n) {
; CHECK-NEXT: b.lt .LBB0_3
; CHECK-NEXT: // %bb.1: // %for.body.preheader
; CHECK-NEXT: adrp x8, A
-; CHECK-NEXT: mov w20, w19
-; CHECK-NEXT: ldr w21, [x8, :lo12:A]
+; CHECK-NEXT: mov w21, w19
+; CHECK-NEXT: ldr w20, [x8, :lo12:A]
; CHECK-NEXT: .LBB0_2: // %for.body
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: mov w0, w21
+; CHECK-NEXT: mov w0, w20
; CHECK-NEXT: bl _Z3usei
-; CHECK-NEXT: sdiv w20, w20, w0
-; CHECK-NEXT: subs w19, w19, #1
+; CHECK-NEXT: sdiv w19, w19, w0
+; CHECK-NEXT: subs w21, w21, #1
; CHECK-NEXT: b.ne .LBB0_2
-; CHECK-NEXT: b .LBB0_4
-; CHECK-NEXT: .LBB0_3:
-; CHECK-NEXT: mov w20, w19
-; CHECK-NEXT: .LBB0_4: // %for.cond.cleanup
-; CHECK-NEXT: mov w0, w20
+; CHECK-NEXT: .LBB0_3: // %for.cond.cleanup
+; CHECK-NEXT: mov w0, w19
; CHECK-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
; CHECK-NEXT: ldp x30, x21, [sp], #32 // 16-byte Folded Reload
; CHECK-NEXT: ret
@@ -82,15 +79,12 @@ define i32 @cant_sink_successive_call(i32 %n) {
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: mov w0, w20
; CHECK-NEXT: bl _Z3usei
-; CHECK-NEXT: sdiv w21, w21, w0
-; CHECK-NEXT: subs w19, w19, #1
+; CHECK-NEXT: sdiv w19, w19, w0
+; CHECK-NEXT: subs w21, w21, #1
; CHECK-NEXT: b.ne .LBB1_2
-; CHECK-NEXT: b .LBB1_4
-; CHECK-NEXT: .LBB1_3:
-; CHECK-NEXT: mov w21, w19
-; CHECK-NEXT: .LBB1_4: // %for.cond.cleanup
+; CHECK-NEXT: .LBB1_3: // %for.cond.cleanup
+; CHECK-NEXT: mov w0, w19
; CHECK-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
-; CHECK-NEXT: mov w0, w21
; CHECK-NEXT: ldp x30, x21, [sp], #32 // 16-byte Folded Reload
; CHECK-NEXT: ret
entry:
@@ -139,15 +133,12 @@ define i32 @cant_sink_successive_store(ptr nocapture readnone %store, i32 %n) {
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: mov w0, w20
; CHECK-NEXT: bl _Z3usei
-; CHECK-NEXT: sdiv w21, w21, w0
-; CHECK-NEXT: subs w19, w19, #1
+; CHECK-NEXT: sdiv w19, w19, w0
+; CHECK-NEXT: subs w21, w21, #1
; CHECK-NEXT: b.ne .LBB2_2
-; CHECK-NEXT: b .LBB2_4
-; CHECK-NEXT: .LBB2_3:
-; CHECK-NEXT: mov w21, w19
-; CHECK-NEXT: .LBB2_4: // %for.cond.cleanup
+; CHECK-NEXT: .LBB2_3: // %for.cond.cleanup
+; CHECK-NEXT: mov w0, w19
; CHECK-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
-; CHECK-NEXT: mov w0, w21
; CHECK-NEXT: ldp x30, x21, [sp], #32 // 16-byte Folded Reload
; CHECK-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/AArch64/machine-outliner-adrp-got-split.mir b/llvm/test/CodeGen/AArch64/machine-outliner-adrp-got-split.mir
new file mode 100644
index 0000000..c397953
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/machine-outliner-adrp-got-split.mir
@@ -0,0 +1,133 @@
+# RUN: llc -mtriple=aarch64--- -run-pass=machine-outliner -verify-machineinstrs %s -o - | FileCheck %s
+--- |
+
+ @x = common global i32 0, align 4
+
+ define i32 @adrp_add() #0 {
+ ret i32 0
+ }
+
+ define i32 @adrp_ldr() #0 {
+ ret i32 0
+ }
+
+ attributes #0 = { noinline noredzone }
+...
+---
+# Check that main function body doesn't split ADRP pair
+#
+# CHECK-LABEL: name: adrp_add
+# CHECK-DAG: bb.0:
+# CHECK: BL @OUTLINED_FUNCTION_[[F0:[0-9]+]]
+# CHECK-NEXT: BL @OUTLINED_FUNCTION_[[F2:[0-9]+]]
+# CHECK-NEXT: $lr = ORRXri $xzr, 1
+name: adrp_add
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $lr
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $x9 = ADRP target-flags(aarch64-page, aarch64-got) @x
+ $x12 = ADDXri $x9, target-flags(aarch64-pageoff, aarch64-got) @x, 0
+ $lr = ORRXri $xzr, 1
+ bb.1:
+ liveins: $lr
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $x9 = ADRP target-flags(aarch64-page, aarch64-got) @x
+ $x12 = ADDXri $x9, target-flags(aarch64-pageoff, aarch64-got) @x, 0
+ $lr = ORRXri $xzr, 1
+ bb.2:
+ liveins: $lr
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $x9 = ADRP target-flags(aarch64-page, aarch64-got) @x
+ $x12 = ADDXri $x9, target-flags(aarch64-pageoff, aarch64-got) @x, 0
+ $lr = ORRXri $xzr, 1
+ bb.3:
+ liveins: $lr
+ RET undef $lr
+...
+---
+# Check that main function body doesn't split ADRP pair
+#
+# CHECK-LABEL: name: adrp_ldr
+# CHECK-DAG: bb.0:
+# CHECK: BL @OUTLINED_FUNCTION_[[F0]]
+# CHECK-NEXT: BL @OUTLINED_FUNCTION_[[F1:[0-9]+]]
+# CHECK-NEXT: $lr = ORRXri $xzr, 1
+name: adrp_ldr
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $lr
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $x9 = ADRP target-flags(aarch64-page, aarch64-got) @x
+ $x12 = LDRXui $x9, target-flags(aarch64-pageoff, aarch64-got) @x
+ $lr = ORRXri $xzr, 1
+ bb.1:
+ liveins: $lr
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $x9 = ADRP target-flags(aarch64-page, aarch64-got) @x
+ $x12 = LDRXui $x9, target-flags(aarch64-pageoff, aarch64-got) @x
+ $lr = ORRXri $xzr, 1
+ bb.2:
+ liveins: $lr
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $w12 = ORRWri $wzr, 1
+ $x9 = ADRP target-flags(aarch64-page, aarch64-got) @x
+ $x12 = LDRXui $x9, target-flags(aarch64-pageoff, aarch64-got) @x
+ $lr = ORRXri $xzr, 1
+ bb.3:
+ liveins: $lr
+ RET undef $lr
+
+# Check that no outlined function split the ADRP pair apart
+#
+# CHECK: OUTLINED_FUNCTION_[[F0]]
+# CHECK-DAG: bb.0
+# CHECK: $w12 = ORRWri $wzr, 1
+# CHECK-NEXT: $w12 = ORRWri $wzr, 1
+# CHECK-NEXT: $w12 = ORRWri $wzr, 1
+# CHECK-NEXT: $w12 = ORRWri $wzr, 1
+# CHECK-NEXT: $w12 = ORRWri $wzr, 1
+# CHECK-NEXT: RET $lr
+
+# CHECK: OUTLINED_FUNCTION_[[F1]]
+# CHECK-DAG: bb.0
+# CHECK: $w12 = ORRWri $wzr, 1
+# CHECK-NEXT: $x9 = ADRP target-flags(aarch64-page, aarch64-got) @x
+# CHECK-NEXT: $x12 = LDRXui $x9, target-flags(aarch64-pageoff, aarch64-got) @x
+
+# CHECK: name: OUTLINED_FUNCTION_[[F2]]
+# CHECK-DAG: bb.0
+# CHECK: $w12 = ORRWri $wzr, 1
+# CHECK-NEXT: $x9 = ADRP target-flags(aarch64-page, aarch64-got) @x
+# CHECK-NEXT: $x12 = ADDXri $x9, target-flags(aarch64-pageoff, aarch64-got) @x, 0
diff --git a/llvm/test/CodeGen/AArch64/machine-sink-kill-flags.ll b/llvm/test/CodeGen/AArch64/machine-sink-kill-flags.ll
index e7e1091..3380842 100644
--- a/llvm/test/CodeGen/AArch64/machine-sink-kill-flags.ll
+++ b/llvm/test/CodeGen/AArch64/machine-sink-kill-flags.ll
@@ -16,13 +16,12 @@ define i32 @test(ptr %ptr) {
; CHECK-NEXT: mov w9, wzr
; CHECK-NEXT: LBB0_1: ; %.thread
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: lsr w11, w9, #1
; CHECK-NEXT: sub w10, w9, #1
-; CHECK-NEXT: mov w9, w11
+; CHECK-NEXT: lsr w9, w9, #1
; CHECK-NEXT: tbnz w10, #0, LBB0_1
; CHECK-NEXT: ; %bb.2: ; %bb343
; CHECK-NEXT: and w9, w10, #0x1
-; CHECK-NEXT: mov w0, #-1
+; CHECK-NEXT: mov w0, #-1 ; =0xffffffff
; CHECK-NEXT: str w9, [x8]
; CHECK-NEXT: ret
bb:
diff --git a/llvm/test/CodeGen/AArch64/sbc.ll b/llvm/test/CodeGen/AArch64/sbc.ll
new file mode 100644
index 0000000..fff63c1
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sbc.ll
@@ -0,0 +1,392 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s | FileCheck --check-prefixes=CHECK,CHECK-SD %s
+; RUN: llc < %s -global-isel | FileCheck --check-prefixes=CHECK,CHECK-GI %s
+
+target triple = "aarch64-none-linux-gnu"
+
+define i32 @test_basic_i32(i32 %a, i32 %b, i32 %x, i32 %y) {
+; CHECK-SD-LABEL: test_basic_i32:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: sbc w0, w2, w3
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_basic_i32:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: sub w9, w2, w3
+; CHECK-GI-NEXT: cset w8, lo
+; CHECK-GI-NEXT: sub w0, w9, w8
+; CHECK-GI-NEXT: ret
+ %cc = icmp ult i32 %a, %b
+ %carry = zext i1 %cc to i32
+ %sub = sub i32 %x, %y
+ %res = sub i32 %sub, %carry
+ ret i32 %res
+}
+
+define i64 @test_basic_i64(i64 %a, i64 %b, i64 %x, i64 %y) {
+; CHECK-SD-LABEL: test_basic_i64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmp x0, x1
+; CHECK-SD-NEXT: sbc x0, x2, x3
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_basic_i64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: cmp x0, x1
+; CHECK-GI-NEXT: sub x9, x2, x3
+; CHECK-GI-NEXT: cset w8, lo
+; CHECK-GI-NEXT: sub x0, x9, x8
+; CHECK-GI-NEXT: ret
+ %cc = icmp ult i64 %a, %b
+ %carry = zext i1 %cc to i64
+ %sub = sub i64 %x, %y
+ %res = sub i64 %sub, %carry
+ ret i64 %res
+}
+
+define i64 @test_mixed_i32_i64(i32 %a, i32 %b, i64 %x, i64 %y) {
+; CHECK-SD-LABEL: test_mixed_i32_i64:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: sbc x0, x2, x3
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_mixed_i32_i64:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: sub x9, x2, x3
+; CHECK-GI-NEXT: cset w8, lo
+; CHECK-GI-NEXT: sub x0, x9, x8
+; CHECK-GI-NEXT: ret
+ %cc = icmp ult i32 %a, %b
+ %carry = zext i1 %cc to i64
+ %sub = sub i64 %x, %y
+ %res = sub i64 %sub, %carry
+ ret i64 %res
+}
+
+define i32 @test_mixed_i64_i32(i64 %a, i64 %b, i32 %x, i32 %y) {
+; CHECK-SD-LABEL: test_mixed_i64_i32:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmp x0, x1
+; CHECK-SD-NEXT: sbc w0, w2, w3
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_mixed_i64_i32:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: cmp x0, x1
+; CHECK-GI-NEXT: sub w9, w2, w3
+; CHECK-GI-NEXT: cset w8, lo
+; CHECK-GI-NEXT: sub w0, w9, w8
+; CHECK-GI-NEXT: ret
+ %cc = icmp ult i64 %a, %b
+ %carry = zext i1 %cc to i32
+ %sub = sub i32 %x, %y
+ %res = sub i32 %sub, %carry
+ ret i32 %res
+}
+
+define i32 @test_only_borrow(i32 %a, i32 %b, i32 %x) {
+; CHECK-SD-LABEL: test_only_borrow:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: sbc w0, w2, wzr
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_only_borrow:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, lo
+; CHECK-GI-NEXT: sub w0, w2, w8
+; CHECK-GI-NEXT: ret
+ %cc = icmp ult i32 %a, %b
+ %carry = zext i1 %cc to i32
+ %res = sub i32 %x, %carry
+ ret i32 %res
+}
+
+define i32 @test_sext_add(i32 %a, i32 %b, i32 %x, i32 %y) {
+; CHECK-SD-LABEL: test_sext_add:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: sbc w0, w2, w3
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_sext_add:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: sub w9, w2, w3
+; CHECK-GI-NEXT: cset w8, lo
+; CHECK-GI-NEXT: sbfx w8, w8, #0, #1
+; CHECK-GI-NEXT: add w0, w9, w8
+; CHECK-GI-NEXT: ret
+ %cc = icmp ult i32 %a, %b
+ %carry = sext i1 %cc to i32
+ %sub = sub i32 %x, %y
+ %res = add i32 %sub, %carry
+ ret i32 %res
+}
+
+; FIXME: This case could be supported with reversed operands to the CMP.
+define i32 @test_ugt(i32 %a, i32 %b, i32 %x, i32 %y) {
+; CHECK-SD-LABEL: test_ugt:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: sub w8, w2, w3
+; CHECK-SD-NEXT: cset w9, hi
+; CHECK-SD-NEXT: sub w0, w8, w9
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_ugt:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: sub w9, w2, w3
+; CHECK-GI-NEXT: cset w8, hi
+; CHECK-GI-NEXT: sub w0, w9, w8
+; CHECK-GI-NEXT: ret
+ %cc = icmp ugt i32 %a, %b
+ %carry = zext i1 %cc to i32
+ %sub = sub i32 %x, %y
+ %res = sub i32 %sub, %carry
+ ret i32 %res
+}
+
+define i32 @test_unsupported_cc_slt(i32 %a, i32 %b, i32 %x, i32 %y) {
+; CHECK-SD-LABEL: test_unsupported_cc_slt:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: sub w8, w2, w3
+; CHECK-SD-NEXT: cset w9, lt
+; CHECK-SD-NEXT: sub w0, w8, w9
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_unsupported_cc_slt:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: sub w9, w2, w3
+; CHECK-GI-NEXT: cset w8, lt
+; CHECK-GI-NEXT: sub w0, w9, w8
+; CHECK-GI-NEXT: ret
+ %cc = icmp slt i32 %a, %b
+ %carry = zext i1 %cc to i32
+ %sub = sub i32 %x, %y
+ %res = sub i32 %sub, %carry
+ ret i32 %res
+}
+
+define i32 @test_unsupported_cc_sgt(i32 %a, i32 %b, i32 %x, i32 %y) {
+; CHECK-SD-LABEL: test_unsupported_cc_sgt:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: sub w8, w2, w3
+; CHECK-SD-NEXT: cset w9, gt
+; CHECK-SD-NEXT: sub w0, w8, w9
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_unsupported_cc_sgt:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: sub w9, w2, w3
+; CHECK-GI-NEXT: cset w8, gt
+; CHECK-GI-NEXT: sub w0, w9, w8
+; CHECK-GI-NEXT: ret
+ %cc = icmp sgt i32 %a, %b
+ %carry = zext i1 %cc to i32
+ %sub = sub i32 %x, %y
+ %res = sub i32 %sub, %carry
+ ret i32 %res
+}
+
+define i32 @test_multiple_setcc_uses(i32 %a, i32 %b, i32 %x) {
+; CHECK-SD-LABEL: test_multiple_setcc_uses:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: stp x30, x19, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: .cfi_offset w19, -8
+; CHECK-SD-NEXT: .cfi_offset w30, -16
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: cset w0, lo
+; CHECK-SD-NEXT: sub w19, w2, w0
+; CHECK-SD-NEXT: bl use
+; CHECK-SD-NEXT: mov w0, w19
+; CHECK-SD-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_multiple_setcc_uses:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: str x30, [sp, #-32]! // 8-byte Folded Spill
+; CHECK-GI-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 32
+; CHECK-GI-NEXT: .cfi_offset w19, -8
+; CHECK-GI-NEXT: .cfi_offset w20, -16
+; CHECK-GI-NEXT: .cfi_offset w30, -32
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: mov w19, w2
+; CHECK-GI-NEXT: cset w20, lo
+; CHECK-GI-NEXT: mov w0, w20
+; CHECK-GI-NEXT: bl use
+; CHECK-GI-NEXT: sub w0, w19, w20
+; CHECK-GI-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
+; CHECK-GI-NEXT: ldr x30, [sp], #32 // 8-byte Folded Reload
+; CHECK-GI-NEXT: ret
+ %cc = icmp ult i32 %a, %b
+ %carry = zext i1 %cc to i32
+ %res = sub i32 %x, %carry
+ tail call void @use(i1 %cc)
+ ret i32 %res
+}
+
+define i32 @test_multiple_carry_uses(i32 %a, i32 %b, i32 %x) {
+; CHECK-SD-LABEL: test_multiple_carry_uses:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: stp x30, x19, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: .cfi_offset w19, -8
+; CHECK-SD-NEXT: .cfi_offset w30, -16
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: cset w0, lo
+; CHECK-SD-NEXT: sub w19, w2, w0
+; CHECK-SD-NEXT: bl use
+; CHECK-SD-NEXT: mov w0, w19
+; CHECK-SD-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_multiple_carry_uses:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: str x30, [sp, #-32]! // 8-byte Folded Spill
+; CHECK-GI-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 32
+; CHECK-GI-NEXT: .cfi_offset w19, -8
+; CHECK-GI-NEXT: .cfi_offset w20, -16
+; CHECK-GI-NEXT: .cfi_offset w30, -32
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: mov w19, w2
+; CHECK-GI-NEXT: cset w20, lo
+; CHECK-GI-NEXT: mov w0, w20
+; CHECK-GI-NEXT: bl use
+; CHECK-GI-NEXT: sub w0, w19, w20
+; CHECK-GI-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
+; CHECK-GI-NEXT: ldr x30, [sp], #32 // 8-byte Folded Reload
+; CHECK-GI-NEXT: ret
+ %cc = icmp ult i32 %a, %b
+ %carry = zext i1 %cc to i32
+ %res = sub i32 %x, %carry
+ tail call void @use(i32 %carry)
+ ret i32 %res
+}
+
+define i32 @test_multiple_sub_uses(i32 %a, i32 %b, i32 %x, i32 %y) {
+; CHECK-SD-LABEL: test_multiple_sub_uses:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: stp x30, x19, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT: .cfi_offset w19, -8
+; CHECK-SD-NEXT: .cfi_offset w30, -16
+; CHECK-SD-NEXT: sub w8, w2, w3
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: mov w0, w8
+; CHECK-SD-NEXT: sbc w19, w2, w3
+; CHECK-SD-NEXT: bl use
+; CHECK-SD-NEXT: mov w0, w19
+; CHECK-SD-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_multiple_sub_uses:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: str x30, [sp, #-32]! // 8-byte Folded Spill
+; CHECK-GI-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-GI-NEXT: .cfi_def_cfa_offset 32
+; CHECK-GI-NEXT: .cfi_offset w19, -8
+; CHECK-GI-NEXT: .cfi_offset w20, -16
+; CHECK-GI-NEXT: .cfi_offset w30, -32
+; CHECK-GI-NEXT: sub w19, w2, w3
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: mov w0, w19
+; CHECK-GI-NEXT: cset w20, lo
+; CHECK-GI-NEXT: bl use
+; CHECK-GI-NEXT: sub w0, w19, w20
+; CHECK-GI-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
+; CHECK-GI-NEXT: ldr x30, [sp], #32 // 8-byte Folded Reload
+; CHECK-GI-NEXT: ret
+ %cc = icmp ult i32 %a, %b
+ %carry = zext i1 %cc to i32
+ %sub = sub i32 %x, %y
+ %res = sub i32 %sub, %carry
+ tail call void @use(i32 %sub)
+ ret i32 %res
+}
+
+define i8 @test_i8(i8 %a, i8 %b, i8 %x, i8 %y) {
+; CHECK-SD-LABEL: test_i8:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: and w8, w0, #0xff
+; CHECK-SD-NEXT: cmp w8, w1, uxtb
+; CHECK-SD-NEXT: sbc w0, w2, w3
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_i8:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: and w8, w0, #0xff
+; CHECK-GI-NEXT: sub w9, w2, w3
+; CHECK-GI-NEXT: cmp w8, w1, uxtb
+; CHECK-GI-NEXT: cset w8, lo
+; CHECK-GI-NEXT: sub w0, w9, w8
+; CHECK-GI-NEXT: ret
+ %cc = icmp ult i8 %a, %b
+ %carry = zext i1 %cc to i8
+ %sub = sub i8 %x, %y
+ %res = sub i8 %sub, %carry
+ ret i8 %res
+}
+
+define i16 @test_i16(i16 %a, i16 %b, i16 %x, i16 %y) {
+; CHECK-SD-LABEL: test_i16:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: and w8, w0, #0xffff
+; CHECK-SD-NEXT: cmp w8, w1, uxth
+; CHECK-SD-NEXT: sbc w0, w2, w3
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_i16:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: and w8, w0, #0xffff
+; CHECK-GI-NEXT: sub w9, w2, w3
+; CHECK-GI-NEXT: cmp w8, w1, uxth
+; CHECK-GI-NEXT: cset w8, lo
+; CHECK-GI-NEXT: sub w0, w9, w8
+; CHECK-GI-NEXT: ret
+ %cc = icmp ult i16 %a, %b
+ %carry = zext i1 %cc to i16
+ %sub = sub i16 %x, %y
+ %res = sub i16 %sub, %carry
+ ret i16 %res
+}
+
+define <4 x i32> @test_v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %x, <4 x i32> %y) {
+; CHECK-SD-LABEL: test_v4i32:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: sub v2.4s, v2.4s, v3.4s
+; CHECK-SD-NEXT: cmhi v0.4s, v1.4s, v0.4s
+; CHECK-SD-NEXT: add v0.4s, v2.4s, v0.4s
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: test_v4i32:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: movi v4.4s, #1
+; CHECK-GI-NEXT: cmhi v0.4s, v1.4s, v0.4s
+; CHECK-GI-NEXT: sub v1.4s, v2.4s, v3.4s
+; CHECK-GI-NEXT: and v0.16b, v0.16b, v4.16b
+; CHECK-GI-NEXT: sub v0.4s, v1.4s, v0.4s
+; CHECK-GI-NEXT: ret
+ %cc = icmp ult <4 x i32> %a, %b
+ %carry = zext <4 x i1> %cc to <4 x i32>
+ %sub = sub <4 x i32> %x, %y
+ %res = sub <4 x i32> %sub, %carry
+ ret <4 x i32> %res
+}
+
+declare void @use()
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/sme-pstate-sm-changing-call-disable-coalescing.ll b/llvm/test/CodeGen/AArch64/sme-pstate-sm-changing-call-disable-coalescing.ll
index b947c94..72f6646 100644
--- a/llvm/test/CodeGen/AArch64/sme-pstate-sm-changing-call-disable-coalescing.ll
+++ b/llvm/test/CodeGen/AArch64/sme-pstate-sm-changing-call-disable-coalescing.ll
@@ -151,12 +151,11 @@ define void @dont_coalesce_arg_f16(half %arg, ptr %ptr) #0 {
; CHECK-NEXT: stp x30, x19, [sp, #80] // 16-byte Folded Spill
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: // kill: def $h0 killed $h0 def $z0
; CHECK-NEXT: add x8, sp, #16
; CHECK-NEXT: mov x19, x0
-; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
-; CHECK-NEXT: // kill: def $h0 killed $h0 killed $z0
; CHECK-NEXT: str h0, [sp, #14] // 2-byte Folded Spill
+; CHECK-NEXT: // kill: def $h0 killed $h0 def $z0
+; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
; CHECK-NEXT: smstop sm
; CHECK-NEXT: ldr h0, [sp, #14] // 2-byte Folded Reload
; CHECK-NEXT: bl use_f16
@@ -190,12 +189,11 @@ define void @dont_coalesce_arg_f32(float %arg, ptr %ptr) #0 {
; CHECK-NEXT: stp x30, x19, [sp, #80] // 16-byte Folded Spill
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: // kill: def $s0 killed $s0 def $z0
; CHECK-NEXT: add x8, sp, #16
; CHECK-NEXT: mov x19, x0
-; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
-; CHECK-NEXT: // kill: def $s0 killed $s0 killed $z0
; CHECK-NEXT: str s0, [sp, #12] // 4-byte Folded Spill
+; CHECK-NEXT: // kill: def $s0 killed $s0 def $z0
+; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
; CHECK-NEXT: smstop sm
; CHECK-NEXT: ldr s0, [sp, #12] // 4-byte Folded Reload
; CHECK-NEXT: bl use_f32
@@ -229,12 +227,11 @@ define void @dont_coalesce_arg_f64(double %arg, ptr %ptr) #0 {
; CHECK-NEXT: stp x30, x19, [sp, #80] // 16-byte Folded Spill
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0
; CHECK-NEXT: add x8, sp, #16
; CHECK-NEXT: mov x19, x0
-; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
; CHECK-NEXT: str d0, [sp, #8] // 8-byte Folded Spill
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
; CHECK-NEXT: smstop sm
; CHECK-NEXT: ldr d0, [sp, #8] // 8-byte Folded Reload
; CHECK-NEXT: bl use_f64
@@ -273,12 +270,11 @@ define void @dont_coalesce_arg_v1i8(<1 x i8> %arg, ptr %ptr) #0 {
; CHECK-NEXT: stp x30, x19, [sp, #80] // 16-byte Folded Spill
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0
; CHECK-NEXT: add x8, sp, #16
; CHECK-NEXT: mov x19, x0
-; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
; CHECK-NEXT: str d0, [sp, #8] // 8-byte Folded Spill
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
; CHECK-NEXT: smstop sm
; CHECK-NEXT: ldr d0, [sp, #8] // 8-byte Folded Reload
; CHECK-NEXT: bl use_v16i8
@@ -313,12 +309,11 @@ define void @dont_coalesce_arg_v1i16(<1 x i16> %arg, ptr %ptr) #0 {
; CHECK-NEXT: stp x30, x19, [sp, #80] // 16-byte Folded Spill
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0
; CHECK-NEXT: add x8, sp, #16
; CHECK-NEXT: mov x19, x0
-; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
; CHECK-NEXT: str d0, [sp, #8] // 8-byte Folded Spill
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
; CHECK-NEXT: smstop sm
; CHECK-NEXT: ldr d0, [sp, #8] // 8-byte Folded Reload
; CHECK-NEXT: bl use_v8i16
@@ -353,12 +348,11 @@ define void @dont_coalesce_arg_v1i32(<1 x i32> %arg, ptr %ptr) #0 {
; CHECK-NEXT: stp x30, x19, [sp, #80] // 16-byte Folded Spill
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0
; CHECK-NEXT: add x8, sp, #16
; CHECK-NEXT: mov x19, x0
-; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
; CHECK-NEXT: str d0, [sp, #8] // 8-byte Folded Spill
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
; CHECK-NEXT: smstop sm
; CHECK-NEXT: ldr d0, [sp, #8] // 8-byte Folded Reload
; CHECK-NEXT: bl use_v4i32
@@ -393,12 +387,11 @@ define void @dont_coalesce_arg_v1i64(<1 x i64> %arg, ptr %ptr) #0 {
; CHECK-NEXT: stp x30, x19, [sp, #80] // 16-byte Folded Spill
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0
; CHECK-NEXT: add x8, sp, #16
; CHECK-NEXT: mov x19, x0
-; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
; CHECK-NEXT: str d0, [sp, #8] // 8-byte Folded Spill
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
; CHECK-NEXT: smstop sm
; CHECK-NEXT: ldr d0, [sp, #8] // 8-byte Folded Reload
; CHECK-NEXT: bl use_v2i64
@@ -433,12 +426,11 @@ define void @dont_coalesce_arg_v1f16(<1 x half> %arg, ptr %ptr) #0 {
; CHECK-NEXT: stp x30, x19, [sp, #80] // 16-byte Folded Spill
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: // kill: def $h0 killed $h0 def $z0
; CHECK-NEXT: add x8, sp, #16
; CHECK-NEXT: mov x19, x0
-; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
-; CHECK-NEXT: // kill: def $h0 killed $h0 killed $z0
; CHECK-NEXT: str h0, [sp, #14] // 2-byte Folded Spill
+; CHECK-NEXT: // kill: def $h0 killed $h0 def $z0
+; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
; CHECK-NEXT: smstop sm
; CHECK-NEXT: ldr h0, [sp, #14] // 2-byte Folded Reload
; CHECK-NEXT: bl use_v8f16
@@ -513,12 +505,11 @@ define void @dont_coalesce_arg_v1f64(<1 x double> %arg, ptr %ptr) #0 {
; CHECK-NEXT: stp x30, x19, [sp, #80] // 16-byte Folded Spill
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0
; CHECK-NEXT: add x8, sp, #16
; CHECK-NEXT: mov x19, x0
-; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
; CHECK-NEXT: str d0, [sp, #8] // 8-byte Folded Spill
+; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
; CHECK-NEXT: smstop sm
; CHECK-NEXT: ldr d0, [sp, #8] // 8-byte Folded Reload
; CHECK-NEXT: bl use_v2f64
@@ -557,12 +548,11 @@ define void @dont_coalesce_arg_v16i8(<16 x i8> %arg, ptr %ptr) #0 {
; CHECK-NEXT: stp x30, x19, [sp, #80] // 16-byte Folded Spill
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
; CHECK-NEXT: add x8, sp, #16
; CHECK-NEXT: mov x19, x0
-; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
-; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
; CHECK-NEXT: smstop sm
; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
; CHECK-NEXT: bl use_v16i8
@@ -596,12 +586,11 @@ define void @dont_coalesce_arg_v8i16(<8 x i16> %arg, ptr %ptr) #0 {
; CHECK-NEXT: stp x30, x19, [sp, #80] // 16-byte Folded Spill
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
; CHECK-NEXT: add x8, sp, #16
; CHECK-NEXT: mov x19, x0
-; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
-; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
; CHECK-NEXT: smstop sm
; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
; CHECK-NEXT: bl use_v8i16
@@ -635,12 +624,11 @@ define void @dont_coalesce_arg_v4i32(<4 x i32> %arg, ptr %ptr) #0 {
; CHECK-NEXT: stp x30, x19, [sp, #80] // 16-byte Folded Spill
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
; CHECK-NEXT: add x8, sp, #16
; CHECK-NEXT: mov x19, x0
-; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
-; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
; CHECK-NEXT: smstop sm
; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
; CHECK-NEXT: bl use_v4i32
@@ -674,12 +662,11 @@ define void @dont_coalesce_arg_v2i64(<2 x i64> %arg, ptr %ptr) #0 {
; CHECK-NEXT: stp x30, x19, [sp, #80] // 16-byte Folded Spill
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
; CHECK-NEXT: add x8, sp, #16
; CHECK-NEXT: mov x19, x0
-; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
-; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
; CHECK-NEXT: smstop sm
; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
; CHECK-NEXT: bl use_v2i64
@@ -713,12 +700,11 @@ define void @dont_coalesce_arg_v8f16(<8 x half> %arg, ptr %ptr) #0 {
; CHECK-NEXT: stp x30, x19, [sp, #80] // 16-byte Folded Spill
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
; CHECK-NEXT: add x8, sp, #16
; CHECK-NEXT: mov x19, x0
-; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
-; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
; CHECK-NEXT: smstop sm
; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
; CHECK-NEXT: bl use_v8f16
@@ -752,12 +738,11 @@ define void @dont_coalesce_arg_v8bf16(<8 x bfloat> %arg, ptr %ptr) #0 {
; CHECK-NEXT: stp x30, x19, [sp, #80] // 16-byte Folded Spill
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
; CHECK-NEXT: add x8, sp, #16
; CHECK-NEXT: mov x19, x0
-; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
-; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
; CHECK-NEXT: smstop sm
; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
; CHECK-NEXT: bl use_v8bf16
@@ -791,12 +776,11 @@ define void @dont_coalesce_arg_v4f32(<4 x float> %arg, ptr %ptr) #0 {
; CHECK-NEXT: stp x30, x19, [sp, #80] // 16-byte Folded Spill
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
; CHECK-NEXT: add x8, sp, #16
; CHECK-NEXT: mov x19, x0
-; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
-; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
; CHECK-NEXT: smstop sm
; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
; CHECK-NEXT: bl use_v4f32
@@ -830,12 +814,11 @@ define void @dont_coalesce_arg_v2f64(<2 x double> %arg, ptr %ptr) #0 {
; CHECK-NEXT: stp x30, x19, [sp, #80] // 16-byte Folded Spill
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: addvl sp, sp, #-1
-; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
; CHECK-NEXT: add x8, sp, #16
; CHECK-NEXT: mov x19, x0
-; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
-; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
; CHECK-NEXT: smstop sm
; CHECK-NEXT: ldr q0, [sp] // 16-byte Folded Reload
; CHECK-NEXT: bl use_v2f64
diff --git a/llvm/test/CodeGen/AArch64/sme-streaming-compatible-interface.ll b/llvm/test/CodeGen/AArch64/sme-streaming-compatible-interface.ll
index f2163ad..df88f37 100644
--- a/llvm/test/CodeGen/AArch64/sme-streaming-compatible-interface.ll
+++ b/llvm/test/CodeGen/AArch64/sme-streaming-compatible-interface.ll
@@ -129,12 +129,11 @@ define <2 x double> @streaming_compatible_with_neon_vectors(<2 x double> %arg) "
; CHECK-NEXT: stp x30, x19, [sp, #80] // 16-byte Folded Spill
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: addvl sp, sp, #-1
+; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: mrs x19, SVCR
; CHECK-NEXT: add x8, sp, #16
; CHECK-NEXT: // kill: def $q0 killed $q0 def $z0
; CHECK-NEXT: str z0, [x8] // 16-byte Folded Spill
-; CHECK-NEXT: mrs x19, SVCR
-; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
-; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
; CHECK-NEXT: tbz w19, #0, .LBB4_2
; CHECK-NEXT: // %bb.1:
; CHECK-NEXT: smstop sm
diff --git a/llvm/test/CodeGen/AArch64/sve-extract-element.ll b/llvm/test/CodeGen/AArch64/sve-extract-element.ll
index c340df1..0cc2e04 100644
--- a/llvm/test/CodeGen/AArch64/sve-extract-element.ll
+++ b/llvm/test/CodeGen/AArch64/sve-extract-element.ll
@@ -12,6 +12,26 @@ define i8 @test_lane0_16xi8(<vscale x 16 x i8> %a) #0 {
ret i8 %b
}
+define i32 @test_lane0_16xi8_zext_i32(<vscale x 16 x i8> %a) #0 {
+; CHECK-LABEL: test_lane0_16xi8_zext_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: umov w0, v0.b[0]
+; CHECK-NEXT: ret
+ %b = extractelement <vscale x 16 x i8> %a, i32 0
+ %c = zext i8 %b to i32
+ ret i32 %c
+}
+
+define i64 @test_lane0_16xi8_zext_i64(<vscale x 16 x i8> %a) #0 {
+; CHECK-LABEL: test_lane0_16xi8_zext_i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: umov w0, v0.b[0]
+; CHECK-NEXT: ret
+ %b = extractelement <vscale x 16 x i8> %a, i32 0
+ %c = zext i8 %b to i64
+ ret i64 %c
+}
+
define i8 @test_lane15_16xi8(<vscale x 16 x i8> %a) #0 {
; CHECK-LABEL: test_lane15_16xi8:
; CHECK: // %bb.0:
@@ -21,6 +41,26 @@ define i8 @test_lane15_16xi8(<vscale x 16 x i8> %a) #0 {
ret i8 %b
}
+define i32 @test_lane15_16xi8_zext_i32(<vscale x 16 x i8> %a) #0 {
+; CHECK-LABEL: test_lane15_16xi8_zext_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: umov w0, v0.b[15]
+; CHECK-NEXT: ret
+ %b = extractelement <vscale x 16 x i8> %a, i32 15
+ %c = zext i8 %b to i32
+ ret i32 %c
+}
+
+define i64 @test_lane15_16xi8_zext_i64(<vscale x 16 x i8> %a) #0 {
+; CHECK-LABEL: test_lane15_16xi8_zext_i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: umov w0, v0.b[15]
+; CHECK-NEXT: ret
+ %b = extractelement <vscale x 16 x i8> %a, i32 15
+ %c = zext i8 %b to i64
+ ret i64 %c
+}
+
define i8 @test_lane16_16xi8(<vscale x 16 x i8> %a) #0 {
; CHECK-LABEL: test_lane16_16xi8:
; CHECK: // %bb.0:
@@ -31,6 +71,32 @@ define i8 @test_lane16_16xi8(<vscale x 16 x i8> %a) #0 {
ret i8 %b
}
+; FIXME: FMOV+AND -> UMOV.
+define i32 @test_lane16_16xi8_zext_i32(<vscale x 16 x i8> %a) #0 {
+; CHECK-LABEL: test_lane16_16xi8_zext_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z0.b, z0.b[16]
+; CHECK-NEXT: fmov w8, s0
+; CHECK-NEXT: and w0, w8, #0xff
+; CHECK-NEXT: ret
+ %b = extractelement <vscale x 16 x i8> %a, i32 16
+ %c = zext i8 %b to i32
+ ret i32 %c
+}
+
+; FIXME: FMOV+AND -> UMOV.
+define i64 @test_lane16_16xi8_zext_i64(<vscale x 16 x i8> %a) #0 {
+; CHECK-LABEL: test_lane16_16xi8_zext_i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z0.b, z0.b[16]
+; CHECK-NEXT: fmov w8, s0
+; CHECK-NEXT: and x0, x8, #0xff
+; CHECK-NEXT: ret
+ %b = extractelement <vscale x 16 x i8> %a, i32 16
+ %c = zext i8 %b to i64
+ ret i64 %c
+}
+
define i16 @test_lane0_8xi16(<vscale x 8 x i16> %a) #0 {
; CHECK-LABEL: test_lane0_8xi16:
; CHECK: // %bb.0:
@@ -40,6 +106,26 @@ define i16 @test_lane0_8xi16(<vscale x 8 x i16> %a) #0 {
ret i16 %b
}
+define i32 @test_lane0_8xi16_zext_i32(<vscale x 8 x i16> %a) #0 {
+; CHECK-LABEL: test_lane0_8xi16_zext_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: umov w0, v0.h[0]
+; CHECK-NEXT: ret
+ %b = extractelement <vscale x 8 x i16> %a, i32 0
+ %c = zext i16 %b to i32
+ ret i32 %c
+}
+
+define i64 @test_lane0_8xi16_zext_i64(<vscale x 8 x i16> %a) #0 {
+; CHECK-LABEL: test_lane0_8xi16_zext_i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: umov w0, v0.h[0]
+; CHECK-NEXT: ret
+ %b = extractelement <vscale x 8 x i16> %a, i32 0
+ %c = zext i16 %b to i64
+ ret i64 %c
+}
+
define i16 @test_lane7_8xi16(<vscale x 8 x i16> %a) #0 {
; CHECK-LABEL: test_lane7_8xi16:
; CHECK: // %bb.0:
@@ -49,6 +135,26 @@ define i16 @test_lane7_8xi16(<vscale x 8 x i16> %a) #0 {
ret i16 %b
}
+define i32 @test_lane7_8xi16_zext_i32(<vscale x 8 x i16> %a) #0 {
+; CHECK-LABEL: test_lane7_8xi16_zext_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: umov w0, v0.h[7]
+; CHECK-NEXT: ret
+ %b = extractelement <vscale x 8 x i16> %a, i32 7
+ %c = zext i16 %b to i32
+ ret i32 %c
+}
+
+define i64 @test_lane7_8xi16_zext_i64(<vscale x 8 x i16> %a) #0 {
+; CHECK-LABEL: test_lane7_8xi16_zext_i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: umov w0, v0.h[7]
+; CHECK-NEXT: ret
+ %b = extractelement <vscale x 8 x i16> %a, i32 7
+ %c = zext i16 %b to i64
+ ret i64 %c
+}
+
define i16 @test_lane8_8xi16(<vscale x 8 x i16> %a) #0 {
; CHECK-LABEL: test_lane8_8xi16:
; CHECK: // %bb.0:
@@ -59,6 +165,32 @@ define i16 @test_lane8_8xi16(<vscale x 8 x i16> %a) #0 {
ret i16 %b
}
+; FIXME: FMOV+AND -> UMOV.
+define i32 @test_lane8_8xi16_zext_i32(<vscale x 8 x i16> %a) #0 {
+; CHECK-LABEL: test_lane8_8xi16_zext_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z0.h, z0.h[8]
+; CHECK-NEXT: fmov w8, s0
+; CHECK-NEXT: and w0, w8, #0xffff
+; CHECK-NEXT: ret
+ %b = extractelement <vscale x 8 x i16> %a, i32 8
+ %c = zext i16 %b to i32
+ ret i32 %c
+}
+
+; FIXME: FMOV+AND -> UMOV.
+define i64 @test_lane8_8xi16_zext_i64(<vscale x 8 x i16> %a) #0 {
+; CHECK-LABEL: test_lane8_8xi16_zext_i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov z0.h, z0.h[8]
+; CHECK-NEXT: fmov w8, s0
+; CHECK-NEXT: and x0, x8, #0xffff
+; CHECK-NEXT: ret
+ %b = extractelement <vscale x 8 x i16> %a, i32 8
+ %c = zext i16 %b to i64
+ ret i64 %c
+}
+
define i32 @test_lane0_4xi32(<vscale x 4 x i32> %a) #0 {
; CHECK-LABEL: test_lane0_4xi32:
; CHECK: // %bb.0:
diff --git a/llvm/test/CodeGen/AArch64/sve-extract-fixed-from-scalable-vector.ll b/llvm/test/CodeGen/AArch64/sve-extract-fixed-from-scalable-vector.ll
index 6c6a691..52a77cb 100644
--- a/llvm/test/CodeGen/AArch64/sve-extract-fixed-from-scalable-vector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-extract-fixed-from-scalable-vector.ll
@@ -147,15 +147,15 @@ define <2 x float> @extract_v2f32_nxv16f32_2(<vscale x 16 x float> %arg) {
define <4 x i1> @extract_v4i1_nxv32i1_0(<vscale x 32 x i1> %arg) {
; CHECK-LABEL: extract_v4i1_nxv32i1_0:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov z1.b, p0/z, #1 // =0x1
-; CHECK-NEXT: umov w8, v1.b[1]
-; CHECK-NEXT: mov v0.16b, v1.16b
-; CHECK-NEXT: umov w9, v1.b[2]
+; CHECK-NEXT: mov z0.b, p0/z, #1 // =0x1
+; CHECK-NEXT: umov w8, v0.b[1]
+; CHECK-NEXT: mov v1.16b, v0.16b
; CHECK-NEXT: mov v0.h[1], w8
+; CHECK-NEXT: umov w8, v1.b[2]
+; CHECK-NEXT: mov v0.h[2], w8
; CHECK-NEXT: umov w8, v1.b[3]
-; CHECK-NEXT: mov v0.h[2], w9
; CHECK-NEXT: mov v0.h[3], w8
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
; CHECK-NEXT: ret
%ext = call <4 x i1> @llvm.vector.extract.v4i1.nxv32i1(<vscale x 32 x i1> %arg, i64 0)
ret <4 x i1> %ext
diff --git a/llvm/test/CodeGen/AArch64/sve-extract-fixed-vector.ll b/llvm/test/CodeGen/AArch64/sve-extract-fixed-vector.ll
index e103137..7299410 100644
--- a/llvm/test/CodeGen/AArch64/sve-extract-fixed-vector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-extract-fixed-vector.ll
@@ -248,15 +248,15 @@ define <2 x i1> @extract_v2i1_nxv2i1(<vscale x 2 x i1> %inmask) {
define <4 x i1> @extract_v4i1_nxv4i1(<vscale x 4 x i1> %inmask) {
; CHECK-LABEL: extract_v4i1_nxv4i1:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov z1.s, p0/z, #1 // =0x1
-; CHECK-NEXT: mov w8, v1.s[1]
-; CHECK-NEXT: mov v0.16b, v1.16b
-; CHECK-NEXT: mov w9, v1.s[2]
+; CHECK-NEXT: mov z0.s, p0/z, #1 // =0x1
+; CHECK-NEXT: mov w8, v0.s[1]
+; CHECK-NEXT: mov v1.16b, v0.16b
; CHECK-NEXT: mov v0.h[1], w8
+; CHECK-NEXT: mov w8, v1.s[2]
+; CHECK-NEXT: mov v0.h[2], w8
; CHECK-NEXT: mov w8, v1.s[3]
-; CHECK-NEXT: mov v0.h[2], w9
; CHECK-NEXT: mov v0.h[3], w8
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
; CHECK-NEXT: ret
%mask = call <4 x i1> @llvm.vector.extract.v4i1.nxv4i1(<vscale x 4 x i1> %inmask, i64 0)
ret <4 x i1> %mask
@@ -265,23 +265,23 @@ define <4 x i1> @extract_v4i1_nxv4i1(<vscale x 4 x i1> %inmask) {
define <8 x i1> @extract_v8i1_nxv8i1(<vscale x 8 x i1> %inmask) {
; CHECK-LABEL: extract_v8i1_nxv8i1:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov z1.h, p0/z, #1 // =0x1
-; CHECK-NEXT: umov w8, v1.h[1]
-; CHECK-NEXT: mov v0.16b, v1.16b
-; CHECK-NEXT: umov w9, v1.h[2]
+; CHECK-NEXT: mov z0.h, p0/z, #1 // =0x1
+; CHECK-NEXT: umov w8, v0.h[1]
+; CHECK-NEXT: mov v1.16b, v0.16b
; CHECK-NEXT: mov v0.b[1], w8
+; CHECK-NEXT: umov w8, v1.h[2]
+; CHECK-NEXT: mov v0.b[2], w8
; CHECK-NEXT: umov w8, v1.h[3]
-; CHECK-NEXT: mov v0.b[2], w9
-; CHECK-NEXT: umov w9, v1.h[4]
; CHECK-NEXT: mov v0.b[3], w8
+; CHECK-NEXT: umov w8, v1.h[4]
+; CHECK-NEXT: mov v0.b[4], w8
; CHECK-NEXT: umov w8, v1.h[5]
-; CHECK-NEXT: mov v0.b[4], w9
-; CHECK-NEXT: umov w9, v1.h[6]
; CHECK-NEXT: mov v0.b[5], w8
+; CHECK-NEXT: umov w8, v1.h[6]
+; CHECK-NEXT: mov v0.b[6], w8
; CHECK-NEXT: umov w8, v1.h[7]
-; CHECK-NEXT: mov v0.b[6], w9
; CHECK-NEXT: mov v0.b[7], w8
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
; CHECK-NEXT: ret
%mask = call <8 x i1> @llvm.vector.extract.v8i1.nxv8i1(<vscale x 8 x i1> %inmask, i64 0)
ret <8 x i1> %mask
@@ -292,9 +292,9 @@ define <8 x i1> @extract_v8i1_nxv8i1(<vscale x 8 x i1> %inmask) {
define <16 x i1> @extract_v16i1_nxv16i1(<vscale x 16 x i1> %inmask) {
; CHECK-LABEL: extract_v16i1_nxv16i1:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov z1.b, p0/z, #1 // =0x1
-; CHECK-NEXT: mov v0.16b, v1.16b
-; CHECK-NEXT: mov v0.b[1], v1.b[1]
+; CHECK-NEXT: mov z0.b, p0/z, #1 // =0x1
+; CHECK-NEXT: mov v1.16b, v0.16b
+; CHECK-NEXT: mov v0.b[1], v0.b[1]
; CHECK-NEXT: mov v0.b[2], v1.b[2]
; CHECK-NEXT: mov v0.b[3], v1.b[3]
; CHECK-NEXT: mov v0.b[4], v1.b[4]
@@ -309,6 +309,7 @@ define <16 x i1> @extract_v16i1_nxv16i1(<vscale x 16 x i1> %inmask) {
; CHECK-NEXT: mov v0.b[13], v1.b[13]
; CHECK-NEXT: mov v0.b[14], v1.b[14]
; CHECK-NEXT: mov v0.b[15], v1.b[15]
+; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
; CHECK-NEXT: ret
%mask = call <16 x i1> @llvm.vector.extract.v16i1.nxv16i1(<vscale x 16 x i1> %inmask, i64 0)
ret <16 x i1> %mask
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-arith.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-arith.ll
index 6fbae7e..2dda03e 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-arith.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-arith.ll
@@ -55,10 +55,9 @@ define void @fadd_v32f16(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1h { z2.h }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1]
; VBITS_GE_256-NEXT: fadd z0.h, p0/m, z0.h, z1.h
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: fadd z1.h, p0/m, z1.h, z3.h
+; VBITS_GE_256-NEXT: fadd z2.h, p0/m, z2.h, z3.h
; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT: st1h { z2.h }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fadd_v32f16:
@@ -154,10 +153,9 @@ define void @fadd_v16f32(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1w { z2.s }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1]
; VBITS_GE_256-NEXT: fadd z0.s, p0/m, z0.s, z1.s
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: fadd z1.s, p0/m, z1.s, z3.s
+; VBITS_GE_256-NEXT: fadd z2.s, p0/m, z2.s, z3.s
; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT: st1w { z2.s }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fadd_v16f32:
@@ -253,10 +251,9 @@ define void @fadd_v8f64(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1d { z2.d }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1]
; VBITS_GE_256-NEXT: fadd z0.d, p0/m, z0.d, z1.d
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: fadd z1.d, p0/m, z1.d, z3.d
+; VBITS_GE_256-NEXT: fadd z2.d, p0/m, z2.d, z3.d
; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT: st1d { z2.d }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fadd_v8f64:
@@ -660,10 +657,9 @@ define void @fma_v32f16(ptr %a, ptr %b, ptr %c) #0 {
; VBITS_GE_256-NEXT: ld1h { z4.h }, p0/z, [x1]
; VBITS_GE_256-NEXT: ld1h { z5.h }, p0/z, [x2]
; VBITS_GE_256-NEXT: fmad z0.h, p0/m, z1.h, z2.h
-; VBITS_GE_256-NEXT: movprfx z1, z5
-; VBITS_GE_256-NEXT: fmla z1.h, p0/m, z3.h, z4.h
+; VBITS_GE_256-NEXT: fmad z3.h, p0/m, z4.h, z5.h
; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT: st1h { z3.h }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fma_v32f16:
@@ -771,10 +767,9 @@ define void @fma_v16f32(ptr %a, ptr %b, ptr %c) #0 {
; VBITS_GE_256-NEXT: ld1w { z4.s }, p0/z, [x1]
; VBITS_GE_256-NEXT: ld1w { z5.s }, p0/z, [x2]
; VBITS_GE_256-NEXT: fmad z0.s, p0/m, z1.s, z2.s
-; VBITS_GE_256-NEXT: movprfx z1, z5
-; VBITS_GE_256-NEXT: fmla z1.s, p0/m, z3.s, z4.s
+; VBITS_GE_256-NEXT: fmad z3.s, p0/m, z4.s, z5.s
; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT: st1w { z3.s }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fma_v16f32:
@@ -881,10 +876,9 @@ define void @fma_v8f64(ptr %a, ptr %b, ptr %c) #0 {
; VBITS_GE_256-NEXT: ld1d { z4.d }, p0/z, [x1]
; VBITS_GE_256-NEXT: ld1d { z5.d }, p0/z, [x2]
; VBITS_GE_256-NEXT: fmad z0.d, p0/m, z1.d, z2.d
-; VBITS_GE_256-NEXT: movprfx z1, z5
-; VBITS_GE_256-NEXT: fmla z1.d, p0/m, z3.d, z4.d
+; VBITS_GE_256-NEXT: fmad z3.d, p0/m, z4.d, z5.d
; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT: st1d { z3.d }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fma_v8f64:
@@ -990,10 +984,9 @@ define void @fmul_v32f16(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1h { z2.h }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1]
; VBITS_GE_256-NEXT: fmul z0.h, p0/m, z0.h, z1.h
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: fmul z1.h, p0/m, z1.h, z3.h
+; VBITS_GE_256-NEXT: fmul z2.h, p0/m, z2.h, z3.h
; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT: st1h { z2.h }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fmul_v32f16:
@@ -1089,10 +1082,9 @@ define void @fmul_v16f32(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1w { z2.s }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1]
; VBITS_GE_256-NEXT: fmul z0.s, p0/m, z0.s, z1.s
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: fmul z1.s, p0/m, z1.s, z3.s
+; VBITS_GE_256-NEXT: fmul z2.s, p0/m, z2.s, z3.s
; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT: st1w { z2.s }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fmul_v16f32:
@@ -1188,10 +1180,9 @@ define void @fmul_v8f64(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1d { z2.d }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1]
; VBITS_GE_256-NEXT: fmul z0.d, p0/m, z0.d, z1.d
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: fmul z1.d, p0/m, z1.d, z3.d
+; VBITS_GE_256-NEXT: fmul z2.d, p0/m, z2.d, z3.d
; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT: st1d { z2.d }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fmul_v8f64:
@@ -1827,10 +1818,9 @@ define void @fsub_v32f16(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1h { z2.h }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1]
; VBITS_GE_256-NEXT: fsub z0.h, p0/m, z0.h, z1.h
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: fsub z1.h, p0/m, z1.h, z3.h
+; VBITS_GE_256-NEXT: fsub z2.h, p0/m, z2.h, z3.h
; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT: st1h { z2.h }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fsub_v32f16:
@@ -1926,10 +1916,9 @@ define void @fsub_v16f32(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1w { z2.s }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1]
; VBITS_GE_256-NEXT: fsub z0.s, p0/m, z0.s, z1.s
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: fsub z1.s, p0/m, z1.s, z3.s
+; VBITS_GE_256-NEXT: fsub z2.s, p0/m, z2.s, z3.s
; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT: st1w { z2.s }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fsub_v16f32:
@@ -2025,10 +2014,9 @@ define void @fsub_v8f64(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1d { z2.d }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1]
; VBITS_GE_256-NEXT: fsub z0.d, p0/m, z0.d, z1.d
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: fsub z1.d, p0/m, z1.d, z3.d
+; VBITS_GE_256-NEXT: fsub z2.d, p0/m, z2.d, z3.d
; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT: st1d { z2.d }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fsub_v8f64:
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-fma.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-fma.ll
index e1ec5ee..633b429 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-fma.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-fma.ll
@@ -64,10 +64,9 @@ define void @fma_v32f16(ptr %a, ptr %b, ptr %c) #0 {
; VBITS_GE_256-NEXT: ld1h { z4.h }, p0/z, [x1]
; VBITS_GE_256-NEXT: ld1h { z5.h }, p0/z, [x2]
; VBITS_GE_256-NEXT: fmad z0.h, p0/m, z1.h, z2.h
-; VBITS_GE_256-NEXT: movprfx z1, z5
-; VBITS_GE_256-NEXT: fmla z1.h, p0/m, z3.h, z4.h
+; VBITS_GE_256-NEXT: fmad z3.h, p0/m, z4.h, z5.h
; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT: st1h { z3.h }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fma_v32f16:
@@ -181,10 +180,9 @@ define void @fma_v16f32(ptr %a, ptr %b, ptr %c) #0 {
; VBITS_GE_256-NEXT: ld1w { z4.s }, p0/z, [x1]
; VBITS_GE_256-NEXT: ld1w { z5.s }, p0/z, [x2]
; VBITS_GE_256-NEXT: fmad z0.s, p0/m, z1.s, z2.s
-; VBITS_GE_256-NEXT: movprfx z1, z5
-; VBITS_GE_256-NEXT: fmla z1.s, p0/m, z3.s, z4.s
+; VBITS_GE_256-NEXT: fmad z3.s, p0/m, z4.s, z5.s
; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT: st1w { z3.s }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fma_v16f32:
@@ -297,10 +295,9 @@ define void @fma_v8f64(ptr %a, ptr %b, ptr %c) #0 {
; VBITS_GE_256-NEXT: ld1d { z4.d }, p0/z, [x1]
; VBITS_GE_256-NEXT: ld1d { z5.d }, p0/z, [x2]
; VBITS_GE_256-NEXT: fmad z0.d, p0/m, z1.d, z2.d
-; VBITS_GE_256-NEXT: movprfx z1, z5
-; VBITS_GE_256-NEXT: fmla z1.d, p0/m, z3.d, z4.d
+; VBITS_GE_256-NEXT: fmad z3.d, p0/m, z4.d, z5.d
; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT: st1d { z3.d }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fma_v8f64:
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-minmax.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-minmax.ll
index de60dee..90a0499 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-minmax.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-minmax.ll
@@ -55,10 +55,9 @@ define void @fmaxnm_v32f16(ptr %a, ptr %b) #0 {
; VBITS_EQ_256-NEXT: ld1h { z2.h }, p0/z, [x0]
; VBITS_EQ_256-NEXT: ld1h { z3.h }, p0/z, [x1]
; VBITS_EQ_256-NEXT: fmaxnm z0.h, p0/m, z0.h, z1.h
-; VBITS_EQ_256-NEXT: movprfx z1, z2
-; VBITS_EQ_256-NEXT: fmaxnm z1.h, p0/m, z1.h, z3.h
+; VBITS_EQ_256-NEXT: fmaxnm z2.h, p0/m, z2.h, z3.h
; VBITS_EQ_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1]
-; VBITS_EQ_256-NEXT: st1h { z1.h }, p0, [x0]
+; VBITS_EQ_256-NEXT: st1h { z2.h }, p0, [x0]
; VBITS_EQ_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fmaxnm_v32f16:
@@ -154,10 +153,9 @@ define void @fmaxnm_v16f32(ptr %a, ptr %b) #0 {
; VBITS_EQ_256-NEXT: ld1w { z2.s }, p0/z, [x0]
; VBITS_EQ_256-NEXT: ld1w { z3.s }, p0/z, [x1]
; VBITS_EQ_256-NEXT: fmaxnm z0.s, p0/m, z0.s, z1.s
-; VBITS_EQ_256-NEXT: movprfx z1, z2
-; VBITS_EQ_256-NEXT: fmaxnm z1.s, p0/m, z1.s, z3.s
+; VBITS_EQ_256-NEXT: fmaxnm z2.s, p0/m, z2.s, z3.s
; VBITS_EQ_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
-; VBITS_EQ_256-NEXT: st1w { z1.s }, p0, [x0]
+; VBITS_EQ_256-NEXT: st1w { z2.s }, p0, [x0]
; VBITS_EQ_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fmaxnm_v16f32:
@@ -253,10 +251,9 @@ define void @fmaxnm_v8f64(ptr %a, ptr %b) #0 {
; VBITS_EQ_256-NEXT: ld1d { z2.d }, p0/z, [x0]
; VBITS_EQ_256-NEXT: ld1d { z3.d }, p0/z, [x1]
; VBITS_EQ_256-NEXT: fmaxnm z0.d, p0/m, z0.d, z1.d
-; VBITS_EQ_256-NEXT: movprfx z1, z2
-; VBITS_EQ_256-NEXT: fmaxnm z1.d, p0/m, z1.d, z3.d
+; VBITS_EQ_256-NEXT: fmaxnm z2.d, p0/m, z2.d, z3.d
; VBITS_EQ_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
-; VBITS_EQ_256-NEXT: st1d { z1.d }, p0, [x0]
+; VBITS_EQ_256-NEXT: st1d { z2.d }, p0, [x0]
; VBITS_EQ_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fmaxnm_v8f64:
@@ -356,10 +353,9 @@ define void @fminnm_v32f16(ptr %a, ptr %b) #0 {
; VBITS_EQ_256-NEXT: ld1h { z2.h }, p0/z, [x0]
; VBITS_EQ_256-NEXT: ld1h { z3.h }, p0/z, [x1]
; VBITS_EQ_256-NEXT: fminnm z0.h, p0/m, z0.h, z1.h
-; VBITS_EQ_256-NEXT: movprfx z1, z2
-; VBITS_EQ_256-NEXT: fminnm z1.h, p0/m, z1.h, z3.h
+; VBITS_EQ_256-NEXT: fminnm z2.h, p0/m, z2.h, z3.h
; VBITS_EQ_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1]
-; VBITS_EQ_256-NEXT: st1h { z1.h }, p0, [x0]
+; VBITS_EQ_256-NEXT: st1h { z2.h }, p0, [x0]
; VBITS_EQ_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fminnm_v32f16:
@@ -455,10 +451,9 @@ define void @fminnm_v16f32(ptr %a, ptr %b) #0 {
; VBITS_EQ_256-NEXT: ld1w { z2.s }, p0/z, [x0]
; VBITS_EQ_256-NEXT: ld1w { z3.s }, p0/z, [x1]
; VBITS_EQ_256-NEXT: fminnm z0.s, p0/m, z0.s, z1.s
-; VBITS_EQ_256-NEXT: movprfx z1, z2
-; VBITS_EQ_256-NEXT: fminnm z1.s, p0/m, z1.s, z3.s
+; VBITS_EQ_256-NEXT: fminnm z2.s, p0/m, z2.s, z3.s
; VBITS_EQ_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
-; VBITS_EQ_256-NEXT: st1w { z1.s }, p0, [x0]
+; VBITS_EQ_256-NEXT: st1w { z2.s }, p0, [x0]
; VBITS_EQ_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fminnm_v16f32:
@@ -554,10 +549,9 @@ define void @fminnm_v8f64(ptr %a, ptr %b) #0 {
; VBITS_EQ_256-NEXT: ld1d { z2.d }, p0/z, [x0]
; VBITS_EQ_256-NEXT: ld1d { z3.d }, p0/z, [x1]
; VBITS_EQ_256-NEXT: fminnm z0.d, p0/m, z0.d, z1.d
-; VBITS_EQ_256-NEXT: movprfx z1, z2
-; VBITS_EQ_256-NEXT: fminnm z1.d, p0/m, z1.d, z3.d
+; VBITS_EQ_256-NEXT: fminnm z2.d, p0/m, z2.d, z3.d
; VBITS_EQ_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
-; VBITS_EQ_256-NEXT: st1d { z1.d }, p0, [x0]
+; VBITS_EQ_256-NEXT: st1d { z2.d }, p0, [x0]
; VBITS_EQ_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fminnm_v8f64:
@@ -657,10 +651,9 @@ define void @fmax_v32f16(ptr %a, ptr %b) #0 {
; VBITS_EQ_256-NEXT: ld1h { z2.h }, p0/z, [x0]
; VBITS_EQ_256-NEXT: ld1h { z3.h }, p0/z, [x1]
; VBITS_EQ_256-NEXT: fmax z0.h, p0/m, z0.h, z1.h
-; VBITS_EQ_256-NEXT: movprfx z1, z2
-; VBITS_EQ_256-NEXT: fmax z1.h, p0/m, z1.h, z3.h
+; VBITS_EQ_256-NEXT: fmax z2.h, p0/m, z2.h, z3.h
; VBITS_EQ_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1]
-; VBITS_EQ_256-NEXT: st1h { z1.h }, p0, [x0]
+; VBITS_EQ_256-NEXT: st1h { z2.h }, p0, [x0]
; VBITS_EQ_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fmax_v32f16:
@@ -756,10 +749,9 @@ define void @fmax_v16f32(ptr %a, ptr %b) #0 {
; VBITS_EQ_256-NEXT: ld1w { z2.s }, p0/z, [x0]
; VBITS_EQ_256-NEXT: ld1w { z3.s }, p0/z, [x1]
; VBITS_EQ_256-NEXT: fmax z0.s, p0/m, z0.s, z1.s
-; VBITS_EQ_256-NEXT: movprfx z1, z2
-; VBITS_EQ_256-NEXT: fmax z1.s, p0/m, z1.s, z3.s
+; VBITS_EQ_256-NEXT: fmax z2.s, p0/m, z2.s, z3.s
; VBITS_EQ_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
-; VBITS_EQ_256-NEXT: st1w { z1.s }, p0, [x0]
+; VBITS_EQ_256-NEXT: st1w { z2.s }, p0, [x0]
; VBITS_EQ_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fmax_v16f32:
@@ -855,10 +847,9 @@ define void @fmax_v8f64(ptr %a, ptr %b) #0 {
; VBITS_EQ_256-NEXT: ld1d { z2.d }, p0/z, [x0]
; VBITS_EQ_256-NEXT: ld1d { z3.d }, p0/z, [x1]
; VBITS_EQ_256-NEXT: fmax z0.d, p0/m, z0.d, z1.d
-; VBITS_EQ_256-NEXT: movprfx z1, z2
-; VBITS_EQ_256-NEXT: fmax z1.d, p0/m, z1.d, z3.d
+; VBITS_EQ_256-NEXT: fmax z2.d, p0/m, z2.d, z3.d
; VBITS_EQ_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
-; VBITS_EQ_256-NEXT: st1d { z1.d }, p0, [x0]
+; VBITS_EQ_256-NEXT: st1d { z2.d }, p0, [x0]
; VBITS_EQ_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fmax_v8f64:
@@ -958,10 +949,9 @@ define void @fmin_v32f16(ptr %a, ptr %b) #0 {
; VBITS_EQ_256-NEXT: ld1h { z2.h }, p0/z, [x0]
; VBITS_EQ_256-NEXT: ld1h { z3.h }, p0/z, [x1]
; VBITS_EQ_256-NEXT: fmin z0.h, p0/m, z0.h, z1.h
-; VBITS_EQ_256-NEXT: movprfx z1, z2
-; VBITS_EQ_256-NEXT: fmin z1.h, p0/m, z1.h, z3.h
+; VBITS_EQ_256-NEXT: fmin z2.h, p0/m, z2.h, z3.h
; VBITS_EQ_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1]
-; VBITS_EQ_256-NEXT: st1h { z1.h }, p0, [x0]
+; VBITS_EQ_256-NEXT: st1h { z2.h }, p0, [x0]
; VBITS_EQ_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fmin_v32f16:
@@ -1057,10 +1047,9 @@ define void @fmin_v16f32(ptr %a, ptr %b) #0 {
; VBITS_EQ_256-NEXT: ld1w { z2.s }, p0/z, [x0]
; VBITS_EQ_256-NEXT: ld1w { z3.s }, p0/z, [x1]
; VBITS_EQ_256-NEXT: fmin z0.s, p0/m, z0.s, z1.s
-; VBITS_EQ_256-NEXT: movprfx z1, z2
-; VBITS_EQ_256-NEXT: fmin z1.s, p0/m, z1.s, z3.s
+; VBITS_EQ_256-NEXT: fmin z2.s, p0/m, z2.s, z3.s
; VBITS_EQ_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
-; VBITS_EQ_256-NEXT: st1w { z1.s }, p0, [x0]
+; VBITS_EQ_256-NEXT: st1w { z2.s }, p0, [x0]
; VBITS_EQ_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fmin_v16f32:
@@ -1156,10 +1145,9 @@ define void @fmin_v8f64(ptr %a, ptr %b) #0 {
; VBITS_EQ_256-NEXT: ld1d { z2.d }, p0/z, [x0]
; VBITS_EQ_256-NEXT: ld1d { z3.d }, p0/z, [x1]
; VBITS_EQ_256-NEXT: fmin z0.d, p0/m, z0.d, z1.d
-; VBITS_EQ_256-NEXT: movprfx z1, z2
-; VBITS_EQ_256-NEXT: fmin z1.d, p0/m, z1.d, z3.d
+; VBITS_EQ_256-NEXT: fmin z2.d, p0/m, z2.d, z3.d
; VBITS_EQ_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
-; VBITS_EQ_256-NEXT: st1d { z1.d }, p0, [x0]
+; VBITS_EQ_256-NEXT: st1d { z2.d }, p0, [x0]
; VBITS_EQ_256-NEXT: ret
;
; VBITS_GE_512-LABEL: fmin_v8f64:
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-abd.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-abd.ll
index 08a974f..a91b392 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-abd.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-abd.ll
@@ -155,10 +155,9 @@ define void @sabd_v64i8_v64i64(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1b { z2.b }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1b { z3.b }, p0/z, [x1]
; VBITS_GE_256-NEXT: sabd z0.b, p0/m, z0.b, z1.b
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: sabd z1.b, p0/m, z1.b, z3.b
+; VBITS_GE_256-NEXT: sabd z2.b, p0/m, z2.b, z3.b
; VBITS_GE_256-NEXT: st1b { z0.b }, p0, [x0, x8]
-; VBITS_GE_256-NEXT: st1b { z1.b }, p0, [x0]
+; VBITS_GE_256-NEXT: st1b { z2.b }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: sabd_v64i8_v64i64:
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-arith.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-arith.ll
index 58fca3a..7362395 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-arith.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-arith.ll
@@ -456,10 +456,9 @@ define void @mul_v64i8(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1b { z2.b }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1b { z3.b }, p0/z, [x1]
; VBITS_GE_256-NEXT: mul z0.b, p0/m, z0.b, z1.b
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: mul z1.b, p0/m, z1.b, z3.b
+; VBITS_GE_256-NEXT: mul z2.b, p0/m, z2.b, z3.b
; VBITS_GE_256-NEXT: st1b { z0.b }, p0, [x0, x8]
-; VBITS_GE_256-NEXT: st1b { z1.b }, p0, [x0]
+; VBITS_GE_256-NEXT: st1b { z2.b }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: mul_v64i8:
@@ -555,10 +554,9 @@ define void @mul_v32i16(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1h { z2.h }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1]
; VBITS_GE_256-NEXT: mul z0.h, p0/m, z0.h, z1.h
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: mul z1.h, p0/m, z1.h, z3.h
+; VBITS_GE_256-NEXT: mul z2.h, p0/m, z2.h, z3.h
; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT: st1h { z2.h }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: mul_v32i16:
@@ -654,10 +652,9 @@ define void @mul_v16i32(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1w { z2.s }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1]
; VBITS_GE_256-NEXT: mul z0.s, p0/m, z0.s, z1.s
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: mul z1.s, p0/m, z1.s, z3.s
+; VBITS_GE_256-NEXT: mul z2.s, p0/m, z2.s, z3.s
; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT: st1w { z2.s }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: mul_v16i32:
@@ -759,10 +756,9 @@ define void @mul_v8i64(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1d { z2.d }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1]
; VBITS_GE_256-NEXT: mul z0.d, p0/m, z0.d, z1.d
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: mul z1.d, p0/m, z1.d, z3.d
+; VBITS_GE_256-NEXT: mul z2.d, p0/m, z2.d, z3.d
; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT: st1d { z2.d }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: mul_v8i64:
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-minmax.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-minmax.ll
index 4926684..c563768 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-minmax.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-minmax.ll
@@ -55,10 +55,9 @@ define void @smax_v64i8(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1b { z2.b }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1b { z3.b }, p0/z, [x1]
; VBITS_GE_256-NEXT: smax z0.b, p0/m, z0.b, z1.b
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: smax z1.b, p0/m, z1.b, z3.b
+; VBITS_GE_256-NEXT: smax z2.b, p0/m, z2.b, z3.b
; VBITS_GE_256-NEXT: st1b { z0.b }, p0, [x0, x8]
-; VBITS_GE_256-NEXT: st1b { z1.b }, p0, [x0]
+; VBITS_GE_256-NEXT: st1b { z2.b }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: smax_v64i8:
@@ -154,10 +153,9 @@ define void @smax_v32i16(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1h { z2.h }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1]
; VBITS_GE_256-NEXT: smax z0.h, p0/m, z0.h, z1.h
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: smax z1.h, p0/m, z1.h, z3.h
+; VBITS_GE_256-NEXT: smax z2.h, p0/m, z2.h, z3.h
; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT: st1h { z2.h }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: smax_v32i16:
@@ -253,10 +251,9 @@ define void @smax_v16i32(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1w { z2.s }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1]
; VBITS_GE_256-NEXT: smax z0.s, p0/m, z0.s, z1.s
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: smax z1.s, p0/m, z1.s, z3.s
+; VBITS_GE_256-NEXT: smax z2.s, p0/m, z2.s, z3.s
; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT: st1w { z2.s }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: smax_v16i32:
@@ -360,10 +357,9 @@ define void @smax_v8i64(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1d { z2.d }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1]
; VBITS_GE_256-NEXT: smax z0.d, p0/m, z0.d, z1.d
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: smax z1.d, p0/m, z1.d, z3.d
+; VBITS_GE_256-NEXT: smax z2.d, p0/m, z2.d, z3.d
; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT: st1d { z2.d }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: smax_v8i64:
@@ -463,10 +459,9 @@ define void @smin_v64i8(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1b { z2.b }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1b { z3.b }, p0/z, [x1]
; VBITS_GE_256-NEXT: smin z0.b, p0/m, z0.b, z1.b
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: smin z1.b, p0/m, z1.b, z3.b
+; VBITS_GE_256-NEXT: smin z2.b, p0/m, z2.b, z3.b
; VBITS_GE_256-NEXT: st1b { z0.b }, p0, [x0, x8]
-; VBITS_GE_256-NEXT: st1b { z1.b }, p0, [x0]
+; VBITS_GE_256-NEXT: st1b { z2.b }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: smin_v64i8:
@@ -562,10 +557,9 @@ define void @smin_v32i16(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1h { z2.h }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1]
; VBITS_GE_256-NEXT: smin z0.h, p0/m, z0.h, z1.h
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: smin z1.h, p0/m, z1.h, z3.h
+; VBITS_GE_256-NEXT: smin z2.h, p0/m, z2.h, z3.h
; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT: st1h { z2.h }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: smin_v32i16:
@@ -661,10 +655,9 @@ define void @smin_v16i32(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1w { z2.s }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1]
; VBITS_GE_256-NEXT: smin z0.s, p0/m, z0.s, z1.s
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: smin z1.s, p0/m, z1.s, z3.s
+; VBITS_GE_256-NEXT: smin z2.s, p0/m, z2.s, z3.s
; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT: st1w { z2.s }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: smin_v16i32:
@@ -768,10 +761,9 @@ define void @smin_v8i64(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1d { z2.d }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1]
; VBITS_GE_256-NEXT: smin z0.d, p0/m, z0.d, z1.d
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: smin z1.d, p0/m, z1.d, z3.d
+; VBITS_GE_256-NEXT: smin z2.d, p0/m, z2.d, z3.d
; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT: st1d { z2.d }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: smin_v8i64:
@@ -871,10 +863,9 @@ define void @umax_v64i8(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1b { z2.b }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1b { z3.b }, p0/z, [x1]
; VBITS_GE_256-NEXT: umax z0.b, p0/m, z0.b, z1.b
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: umax z1.b, p0/m, z1.b, z3.b
+; VBITS_GE_256-NEXT: umax z2.b, p0/m, z2.b, z3.b
; VBITS_GE_256-NEXT: st1b { z0.b }, p0, [x0, x8]
-; VBITS_GE_256-NEXT: st1b { z1.b }, p0, [x0]
+; VBITS_GE_256-NEXT: st1b { z2.b }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: umax_v64i8:
@@ -970,10 +961,9 @@ define void @umax_v32i16(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1h { z2.h }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1]
; VBITS_GE_256-NEXT: umax z0.h, p0/m, z0.h, z1.h
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: umax z1.h, p0/m, z1.h, z3.h
+; VBITS_GE_256-NEXT: umax z2.h, p0/m, z2.h, z3.h
; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT: st1h { z2.h }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: umax_v32i16:
@@ -1069,10 +1059,9 @@ define void @umax_v16i32(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1w { z2.s }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1]
; VBITS_GE_256-NEXT: umax z0.s, p0/m, z0.s, z1.s
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: umax z1.s, p0/m, z1.s, z3.s
+; VBITS_GE_256-NEXT: umax z2.s, p0/m, z2.s, z3.s
; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT: st1w { z2.s }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: umax_v16i32:
@@ -1176,10 +1165,9 @@ define void @umax_v8i64(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1d { z2.d }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1]
; VBITS_GE_256-NEXT: umax z0.d, p0/m, z0.d, z1.d
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: umax z1.d, p0/m, z1.d, z3.d
+; VBITS_GE_256-NEXT: umax z2.d, p0/m, z2.d, z3.d
; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT: st1d { z2.d }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: umax_v8i64:
@@ -1279,10 +1267,9 @@ define void @umin_v64i8(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1b { z2.b }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1b { z3.b }, p0/z, [x1]
; VBITS_GE_256-NEXT: umin z0.b, p0/m, z0.b, z1.b
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: umin z1.b, p0/m, z1.b, z3.b
+; VBITS_GE_256-NEXT: umin z2.b, p0/m, z2.b, z3.b
; VBITS_GE_256-NEXT: st1b { z0.b }, p0, [x0, x8]
-; VBITS_GE_256-NEXT: st1b { z1.b }, p0, [x0]
+; VBITS_GE_256-NEXT: st1b { z2.b }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: umin_v64i8:
@@ -1378,10 +1365,9 @@ define void @umin_v32i16(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1h { z2.h }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1]
; VBITS_GE_256-NEXT: umin z0.h, p0/m, z0.h, z1.h
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: umin z1.h, p0/m, z1.h, z3.h
+; VBITS_GE_256-NEXT: umin z2.h, p0/m, z2.h, z3.h
; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT: st1h { z2.h }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: umin_v32i16:
@@ -1477,10 +1463,9 @@ define void @umin_v16i32(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1w { z2.s }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1]
; VBITS_GE_256-NEXT: umin z0.s, p0/m, z0.s, z1.s
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: umin z1.s, p0/m, z1.s, z3.s
+; VBITS_GE_256-NEXT: umin z2.s, p0/m, z2.s, z3.s
; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT: st1w { z2.s }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: umin_v16i32:
@@ -1584,10 +1569,9 @@ define void @umin_v8i64(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1d { z2.d }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1]
; VBITS_GE_256-NEXT: umin z0.d, p0/m, z0.d, z1.d
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: umin z1.d, p0/m, z1.d, z3.d
+; VBITS_GE_256-NEXT: umin z2.d, p0/m, z2.d, z3.d
; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT: st1d { z2.d }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: umin_v8i64:
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-mulh.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-mulh.ll
index 41cce35..dfbc237 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-mulh.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-mulh.ll
@@ -78,10 +78,9 @@ define void @smulh_v64i8(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1b { z2.b }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1b { z3.b }, p0/z, [x1]
; VBITS_GE_256-NEXT: smulh z0.b, p0/m, z0.b, z1.b
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: smulh z1.b, p0/m, z1.b, z3.b
+; VBITS_GE_256-NEXT: smulh z2.b, p0/m, z2.b, z3.b
; VBITS_GE_256-NEXT: st1b { z0.b }, p0, [x0, x8]
-; VBITS_GE_256-NEXT: st1b { z1.b }, p0, [x0]
+; VBITS_GE_256-NEXT: st1b { z2.b }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: smulh_v64i8:
@@ -209,10 +208,9 @@ define void @smulh_v32i16(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1h { z2.h }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1]
; VBITS_GE_256-NEXT: smulh z0.h, p0/m, z0.h, z1.h
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: smulh z1.h, p0/m, z1.h, z3.h
+; VBITS_GE_256-NEXT: smulh z2.h, p0/m, z2.h, z3.h
; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT: st1h { z2.h }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: smulh_v32i16:
@@ -340,10 +338,9 @@ define void @smulh_v16i32(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1w { z2.s }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1]
; VBITS_GE_256-NEXT: smulh z0.s, p0/m, z0.s, z1.s
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: smulh z1.s, p0/m, z1.s, z3.s
+; VBITS_GE_256-NEXT: smulh z2.s, p0/m, z2.s, z3.s
; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT: st1w { z2.s }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: smulh_v16i32:
@@ -471,10 +468,9 @@ define void @smulh_v8i64(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1d { z2.d }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1]
; VBITS_GE_256-NEXT: smulh z0.d, p0/m, z0.d, z1.d
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: smulh z1.d, p0/m, z1.d, z3.d
+; VBITS_GE_256-NEXT: smulh z2.d, p0/m, z2.d, z3.d
; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT: st1d { z2.d }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: smulh_v8i64:
@@ -607,10 +603,9 @@ define void @umulh_v64i8(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1b { z2.b }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1b { z3.b }, p0/z, [x1]
; VBITS_GE_256-NEXT: umulh z0.b, p0/m, z0.b, z1.b
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: umulh z1.b, p0/m, z1.b, z3.b
+; VBITS_GE_256-NEXT: umulh z2.b, p0/m, z2.b, z3.b
; VBITS_GE_256-NEXT: st1b { z0.b }, p0, [x0, x8]
-; VBITS_GE_256-NEXT: st1b { z1.b }, p0, [x0]
+; VBITS_GE_256-NEXT: st1b { z2.b }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: umulh_v64i8:
@@ -739,10 +734,9 @@ define void @umulh_v32i16(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1h { z2.h }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1]
; VBITS_GE_256-NEXT: umulh z0.h, p0/m, z0.h, z1.h
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: umulh z1.h, p0/m, z1.h, z3.h
+; VBITS_GE_256-NEXT: umulh z2.h, p0/m, z2.h, z3.h
; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT: st1h { z2.h }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: umulh_v32i16:
@@ -870,10 +864,9 @@ define void @umulh_v16i32(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1w { z2.s }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1]
; VBITS_GE_256-NEXT: umulh z0.s, p0/m, z0.s, z1.s
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: umulh z1.s, p0/m, z1.s, z3.s
+; VBITS_GE_256-NEXT: umulh z2.s, p0/m, z2.s, z3.s
; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT: st1w { z2.s }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: umulh_v16i32:
@@ -1001,10 +994,9 @@ define void @umulh_v8i64(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1d { z2.d }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1]
; VBITS_GE_256-NEXT: umulh z0.d, p0/m, z0.d, z1.d
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: umulh z1.d, p0/m, z1.d, z3.d
+; VBITS_GE_256-NEXT: umulh z2.d, p0/m, z2.d, z3.d
; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT: st1d { z2.d }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: umulh_v8i64:
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-rem.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-rem.ll
index 27be844..14204e9 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-rem.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-rem.ll
@@ -616,10 +616,9 @@ define void @srem_v16i32(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: movprfx z5, z3
; VBITS_GE_256-NEXT: sdiv z5.s, p0/m, z5.s, z4.s
; VBITS_GE_256-NEXT: mls z0.s, p0/m, z2.s, z1.s
-; VBITS_GE_256-NEXT: movprfx z1, z3
-; VBITS_GE_256-NEXT: mls z1.s, p0/m, z5.s, z4.s
+; VBITS_GE_256-NEXT: mls z3.s, p0/m, z5.s, z4.s
; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT: st1w { z3.s }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: srem_v16i32:
@@ -744,11 +743,10 @@ define void @srem_v8i64(ptr %a, ptr %b) #0 {
; VBITS_GE_128-NEXT: movprfx z18, z16
; VBITS_GE_128-NEXT: sdiv z18.d, p0/m, z18.d, z17.d
; VBITS_GE_128-NEXT: msb z0.d, p0/m, z4.d, z1.d
-; VBITS_GE_128-NEXT: movprfx z1, z2
-; VBITS_GE_128-NEXT: mls z1.d, p0/m, z19.d, z3.d
+; VBITS_GE_128-NEXT: mls z2.d, p0/m, z19.d, z3.d
; VBITS_GE_128-NEXT: mls z16.d, p0/m, z18.d, z17.d
; VBITS_GE_128-NEXT: mls z5.d, p0/m, z7.d, z6.d
-; VBITS_GE_128-NEXT: stp q0, q1, [x0]
+; VBITS_GE_128-NEXT: stp q0, q2, [x0]
; VBITS_GE_128-NEXT: stp q16, q5, [x0, #32]
; VBITS_GE_128-NEXT: ret
;
@@ -765,10 +763,9 @@ define void @srem_v8i64(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: movprfx z5, z3
; VBITS_GE_256-NEXT: sdiv z5.d, p0/m, z5.d, z4.d
; VBITS_GE_256-NEXT: mls z0.d, p0/m, z2.d, z1.d
-; VBITS_GE_256-NEXT: movprfx z1, z3
-; VBITS_GE_256-NEXT: mls z1.d, p0/m, z5.d, z4.d
+; VBITS_GE_256-NEXT: mls z3.d, p0/m, z5.d, z4.d
; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT: st1d { z3.d }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: srem_v8i64:
@@ -1434,10 +1431,9 @@ define void @urem_v16i32(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: movprfx z5, z3
; VBITS_GE_256-NEXT: udiv z5.s, p0/m, z5.s, z4.s
; VBITS_GE_256-NEXT: mls z0.s, p0/m, z2.s, z1.s
-; VBITS_GE_256-NEXT: movprfx z1, z3
-; VBITS_GE_256-NEXT: mls z1.s, p0/m, z5.s, z4.s
+; VBITS_GE_256-NEXT: mls z3.s, p0/m, z5.s, z4.s
; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT: st1w { z3.s }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: urem_v16i32:
@@ -1562,11 +1558,10 @@ define void @urem_v8i64(ptr %a, ptr %b) #0 {
; VBITS_GE_128-NEXT: movprfx z18, z16
; VBITS_GE_128-NEXT: udiv z18.d, p0/m, z18.d, z17.d
; VBITS_GE_128-NEXT: msb z0.d, p0/m, z4.d, z1.d
-; VBITS_GE_128-NEXT: movprfx z1, z2
-; VBITS_GE_128-NEXT: mls z1.d, p0/m, z19.d, z3.d
+; VBITS_GE_128-NEXT: mls z2.d, p0/m, z19.d, z3.d
; VBITS_GE_128-NEXT: mls z16.d, p0/m, z18.d, z17.d
; VBITS_GE_128-NEXT: mls z5.d, p0/m, z7.d, z6.d
-; VBITS_GE_128-NEXT: stp q0, q1, [x0]
+; VBITS_GE_128-NEXT: stp q0, q2, [x0]
; VBITS_GE_128-NEXT: stp q16, q5, [x0, #32]
; VBITS_GE_128-NEXT: ret
;
@@ -1583,10 +1578,9 @@ define void @urem_v8i64(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: movprfx z5, z3
; VBITS_GE_256-NEXT: udiv z5.d, p0/m, z5.d, z4.d
; VBITS_GE_256-NEXT: mls z0.d, p0/m, z2.d, z1.d
-; VBITS_GE_256-NEXT: movprfx z1, z3
-; VBITS_GE_256-NEXT: mls z1.d, p0/m, z5.d, z4.d
+; VBITS_GE_256-NEXT: mls z3.d, p0/m, z5.d, z4.d
; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT: st1d { z3.d }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: urem_v8i64:
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-shifts.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-shifts.ll
index 0fa8c8f5..a8afa90 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-shifts.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-shifts.ll
@@ -57,10 +57,9 @@ define void @ashr_v64i8(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1b { z2.b }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1b { z3.b }, p0/z, [x1]
; VBITS_GE_256-NEXT: asr z0.b, p0/m, z0.b, z1.b
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: asr z1.b, p0/m, z1.b, z3.b
+; VBITS_GE_256-NEXT: asr z2.b, p0/m, z2.b, z3.b
; VBITS_GE_256-NEXT: st1b { z0.b }, p0, [x0, x8]
-; VBITS_GE_256-NEXT: st1b { z1.b }, p0, [x0]
+; VBITS_GE_256-NEXT: st1b { z2.b }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: ashr_v64i8:
@@ -158,10 +157,9 @@ define void @ashr_v32i16(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1h { z2.h }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1]
; VBITS_GE_256-NEXT: asr z0.h, p0/m, z0.h, z1.h
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: asr z1.h, p0/m, z1.h, z3.h
+; VBITS_GE_256-NEXT: asr z2.h, p0/m, z2.h, z3.h
; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT: st1h { z2.h }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: ashr_v32i16:
@@ -259,10 +257,9 @@ define void @ashr_v16i32(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1w { z2.s }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1]
; VBITS_GE_256-NEXT: asr z0.s, p0/m, z0.s, z1.s
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: asr z1.s, p0/m, z1.s, z3.s
+; VBITS_GE_256-NEXT: asr z2.s, p0/m, z2.s, z3.s
; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT: st1w { z2.s }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: ashr_v16i32:
@@ -360,10 +357,9 @@ define void @ashr_v8i64(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1d { z2.d }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1]
; VBITS_GE_256-NEXT: asr z0.d, p0/m, z0.d, z1.d
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: asr z1.d, p0/m, z1.d, z3.d
+; VBITS_GE_256-NEXT: asr z2.d, p0/m, z2.d, z3.d
; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT: st1d { z2.d }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: ashr_v8i64:
@@ -465,10 +461,9 @@ define void @lshr_v64i8(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1b { z2.b }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1b { z3.b }, p0/z, [x1]
; VBITS_GE_256-NEXT: lsr z0.b, p0/m, z0.b, z1.b
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: lsr z1.b, p0/m, z1.b, z3.b
+; VBITS_GE_256-NEXT: lsr z2.b, p0/m, z2.b, z3.b
; VBITS_GE_256-NEXT: st1b { z0.b }, p0, [x0, x8]
-; VBITS_GE_256-NEXT: st1b { z1.b }, p0, [x0]
+; VBITS_GE_256-NEXT: st1b { z2.b }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: lshr_v64i8:
@@ -566,10 +561,9 @@ define void @lshr_v32i16(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1h { z2.h }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1]
; VBITS_GE_256-NEXT: lsr z0.h, p0/m, z0.h, z1.h
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: lsr z1.h, p0/m, z1.h, z3.h
+; VBITS_GE_256-NEXT: lsr z2.h, p0/m, z2.h, z3.h
; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT: st1h { z2.h }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: lshr_v32i16:
@@ -667,10 +661,9 @@ define void @lshr_v16i32(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1w { z2.s }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1]
; VBITS_GE_256-NEXT: lsr z0.s, p0/m, z0.s, z1.s
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: lsr z1.s, p0/m, z1.s, z3.s
+; VBITS_GE_256-NEXT: lsr z2.s, p0/m, z2.s, z3.s
; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT: st1w { z2.s }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: lshr_v16i32:
@@ -768,10 +761,9 @@ define void @lshr_v8i64(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1d { z2.d }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1]
; VBITS_GE_256-NEXT: lsr z0.d, p0/m, z0.d, z1.d
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: lsr z1.d, p0/m, z1.d, z3.d
+; VBITS_GE_256-NEXT: lsr z2.d, p0/m, z2.d, z3.d
; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT: st1d { z2.d }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: lshr_v8i64:
@@ -871,10 +863,9 @@ define void @shl_v64i8(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1b { z2.b }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1b { z3.b }, p0/z, [x1]
; VBITS_GE_256-NEXT: lsl z0.b, p0/m, z0.b, z1.b
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: lsl z1.b, p0/m, z1.b, z3.b
+; VBITS_GE_256-NEXT: lsl z2.b, p0/m, z2.b, z3.b
; VBITS_GE_256-NEXT: st1b { z0.b }, p0, [x0, x8]
-; VBITS_GE_256-NEXT: st1b { z1.b }, p0, [x0]
+; VBITS_GE_256-NEXT: st1b { z2.b }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: shl_v64i8:
@@ -970,10 +961,9 @@ define void @shl_v32i16(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1h { z2.h }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1h { z3.h }, p0/z, [x1]
; VBITS_GE_256-NEXT: lsl z0.h, p0/m, z0.h, z1.h
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: lsl z1.h, p0/m, z1.h, z3.h
+; VBITS_GE_256-NEXT: lsl z2.h, p0/m, z2.h, z3.h
; VBITS_GE_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1]
-; VBITS_GE_256-NEXT: st1h { z1.h }, p0, [x0]
+; VBITS_GE_256-NEXT: st1h { z2.h }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: shl_v32i16:
@@ -1069,10 +1059,9 @@ define void @shl_v16i32(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1w { z2.s }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1w { z3.s }, p0/z, [x1]
; VBITS_GE_256-NEXT: lsl z0.s, p0/m, z0.s, z1.s
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: lsl z1.s, p0/m, z1.s, z3.s
+; VBITS_GE_256-NEXT: lsl z2.s, p0/m, z2.s, z3.s
; VBITS_GE_256-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2]
-; VBITS_GE_256-NEXT: st1w { z1.s }, p0, [x0]
+; VBITS_GE_256-NEXT: st1w { z2.s }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: shl_v16i32:
@@ -1168,10 +1157,9 @@ define void @shl_v8i64(ptr %a, ptr %b) #0 {
; VBITS_GE_256-NEXT: ld1d { z2.d }, p0/z, [x0]
; VBITS_GE_256-NEXT: ld1d { z3.d }, p0/z, [x1]
; VBITS_GE_256-NEXT: lsl z0.d, p0/m, z0.d, z1.d
-; VBITS_GE_256-NEXT: movprfx z1, z2
-; VBITS_GE_256-NEXT: lsl z1.d, p0/m, z1.d, z3.d
+; VBITS_GE_256-NEXT: lsl z2.d, p0/m, z2.d, z3.d
; VBITS_GE_256-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
-; VBITS_GE_256-NEXT: st1d { z1.d }, p0, [x0]
+; VBITS_GE_256-NEXT: st1d { z2.d }, p0, [x0]
; VBITS_GE_256-NEXT: ret
;
; VBITS_GE_512-LABEL: shl_v8i64:
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-reshuffle.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-reshuffle.ll
index 41e4a38..8e807cd 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-reshuffle.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-reshuffle.ll
@@ -8,15 +8,15 @@ target triple = "aarch64-unknown-linux-gnu"
define <4 x i1> @reshuffle_v4i1_nxv4i1(<vscale x 4 x i1> %a) #0 {
; CHECK-LABEL: reshuffle_v4i1_nxv4i1:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov z1.s, p0/z, #1 // =0x1
-; CHECK-NEXT: mov w8, v1.s[1]
-; CHECK-NEXT: mov v0.16b, v1.16b
-; CHECK-NEXT: mov w9, v1.s[2]
+; CHECK-NEXT: mov z0.s, p0/z, #1 // =0x1
+; CHECK-NEXT: mov w8, v0.s[1]
+; CHECK-NEXT: mov v1.16b, v0.16b
; CHECK-NEXT: mov v0.h[1], w8
+; CHECK-NEXT: mov w8, v1.s[2]
+; CHECK-NEXT: mov v0.h[2], w8
; CHECK-NEXT: mov w8, v1.s[3]
-; CHECK-NEXT: mov v0.h[2], w9
; CHECK-NEXT: mov v0.h[3], w8
-; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
; CHECK-NEXT: ret
%el0 = extractelement <vscale x 4 x i1> %a, i32 0
%el1 = extractelement <vscale x 4 x i1> %a, i32 1
diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll
index ba4a3a2..bd8f432 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll
@@ -28,53 +28,53 @@ define void @crash_when_lowering_extract_shuffle(ptr %dst, i1 %cond) vscale_rang
; CHECK: // %bb.0:
; CHECK-NEXT: tbnz w1, #0, .LBB1_2
; CHECK-NEXT: // %bb.1: // %vector.body
+; CHECK-NEXT: movi v2.2d, #0000000000000000
; CHECK-NEXT: movi v0.2d, #0000000000000000
-; CHECK-NEXT: movi v1.2d, #0000000000000000
+; CHECK-NEXT: movi v3.2d, #0000000000000000
; CHECK-NEXT: ptrue p0.s
-; CHECK-NEXT: umov w8, v0.b[8]
-; CHECK-NEXT: mov v1.b[1], v0.b[1]
-; CHECK-NEXT: movprfx z3, z0
-; CHECK-NEXT: ext z3.b, z3.b, z0.b, #16
+; CHECK-NEXT: umov w8, v2.b[8]
+; CHECK-NEXT: mov v0.b[1], v2.b[1]
+; CHECK-NEXT: ext z3.b, z3.b, z3.b, #16
; CHECK-NEXT: ext v4.16b, v3.16b, v3.16b, #8
-; CHECK-NEXT: fmov s2, w8
-; CHECK-NEXT: mov v1.b[2], v0.b[2]
-; CHECK-NEXT: mov v2.b[1], v0.b[9]
-; CHECK-NEXT: mov v1.b[3], v0.b[3]
-; CHECK-NEXT: mov v2.b[2], v0.b[10]
-; CHECK-NEXT: mov v1.b[4], v0.b[4]
-; CHECK-NEXT: mov v2.b[3], v0.b[11]
-; CHECK-NEXT: mov v1.b[5], v0.b[5]
-; CHECK-NEXT: mov v2.b[4], v0.b[12]
-; CHECK-NEXT: mov v1.b[6], v0.b[6]
-; CHECK-NEXT: mov v2.b[5], v0.b[13]
-; CHECK-NEXT: mov v1.b[7], v0.b[7]
-; CHECK-NEXT: mov v2.b[6], v0.b[14]
-; CHECK-NEXT: uunpklo z1.h, z1.b
-; CHECK-NEXT: mov v2.b[7], v0.b[15]
-; CHECK-NEXT: uunpklo z0.h, z3.b
+; CHECK-NEXT: fmov s1, w8
+; CHECK-NEXT: mov v0.b[2], v2.b[2]
+; CHECK-NEXT: mov v1.b[1], v2.b[9]
+; CHECK-NEXT: mov v0.b[3], v2.b[3]
+; CHECK-NEXT: mov v1.b[2], v2.b[10]
+; CHECK-NEXT: mov v0.b[4], v2.b[4]
+; CHECK-NEXT: mov v1.b[3], v2.b[11]
+; CHECK-NEXT: mov v0.b[5], v2.b[5]
+; CHECK-NEXT: mov v1.b[4], v2.b[12]
+; CHECK-NEXT: mov v0.b[6], v2.b[6]
+; CHECK-NEXT: mov v1.b[5], v2.b[13]
+; CHECK-NEXT: mov v0.b[7], v2.b[7]
+; CHECK-NEXT: mov v1.b[6], v2.b[14]
+; CHECK-NEXT: uunpklo z0.h, z0.b
+; CHECK-NEXT: mov v1.b[7], v2.b[15]
+; CHECK-NEXT: uunpklo z2.h, z3.b
; CHECK-NEXT: uunpklo z3.h, z4.b
-; CHECK-NEXT: uunpklo z1.s, z1.h
-; CHECK-NEXT: uunpklo z2.h, z2.b
; CHECK-NEXT: uunpklo z0.s, z0.h
-; CHECK-NEXT: uunpklo z3.s, z3.h
-; CHECK-NEXT: lsl z1.s, z1.s, #31
+; CHECK-NEXT: uunpklo z1.h, z1.b
; CHECK-NEXT: uunpklo z2.s, z2.h
+; CHECK-NEXT: uunpklo z3.s, z3.h
; CHECK-NEXT: lsl z0.s, z0.s, #31
+; CHECK-NEXT: uunpklo z1.s, z1.h
+; CHECK-NEXT: lsl z2.s, z2.s, #31
; CHECK-NEXT: lsl z3.s, z3.s, #31
-; CHECK-NEXT: asr z1.s, z1.s, #31
; CHECK-NEXT: asr z0.s, z0.s, #31
+; CHECK-NEXT: asr z2.s, z2.s, #31
; CHECK-NEXT: asr z3.s, z3.s, #31
-; CHECK-NEXT: lsl z2.s, z2.s, #31
-; CHECK-NEXT: cmpne p3.s, p0/z, z1.s, #0
-; CHECK-NEXT: cmpne p1.s, p0/z, z0.s, #0
-; CHECK-NEXT: movi v0.2d, #0000000000000000
+; CHECK-NEXT: lsl z1.s, z1.s, #31
+; CHECK-NEXT: cmpne p3.s, p0/z, z0.s, #0
+; CHECK-NEXT: cmpne p1.s, p0/z, z2.s, #0
+; CHECK-NEXT: movi v2.2d, #0000000000000000
; CHECK-NEXT: cmpne p2.s, p0/z, z3.s, #0
-; CHECK-NEXT: asr z2.s, z2.s, #31
-; CHECK-NEXT: cmpne p0.s, p0/z, z2.s, #0
-; CHECK-NEXT: st1w { z0.s }, p1, [x0, #2, mul vl]
-; CHECK-NEXT: st1w { z0.s }, p2, [x0, #3, mul vl]
-; CHECK-NEXT: st1w { z0.s }, p3, [x0]
-; CHECK-NEXT: st1w { z0.s }, p0, [x0, #1, mul vl]
+; CHECK-NEXT: asr z1.s, z1.s, #31
+; CHECK-NEXT: cmpne p0.s, p0/z, z1.s, #0
+; CHECK-NEXT: st1w { z2.s }, p1, [x0, #2, mul vl]
+; CHECK-NEXT: st1w { z2.s }, p2, [x0, #3, mul vl]
+; CHECK-NEXT: st1w { z2.s }, p3, [x0]
+; CHECK-NEXT: st1w { z2.s }, p0, [x0, #1, mul vl]
; CHECK-NEXT: .LBB1_2: // %exit
; CHECK-NEXT: ret
%broadcast.splat = shufflevector <32 x i1> zeroinitializer, <32 x i1> zeroinitializer, <32 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/AArch64/sve-ptest-removal-sink.ll b/llvm/test/CodeGen/AArch64/sve-ptest-removal-sink.ll
index 124f81e..39fe92a 100644
--- a/llvm/test/CodeGen/AArch64/sve-ptest-removal-sink.ll
+++ b/llvm/test/CodeGen/AArch64/sve-ptest-removal-sink.ll
@@ -11,12 +11,12 @@ define void @test_sink_ptrue_into_ptest(i32 %n) {
; CHECK-NEXT: whilelt p0.s, wzr, w0
; CHECK-NEXT: b.pl .LBB0_3
; CHECK-NEXT: // %bb.1: // %for.body.preheader
-; CHECK-NEXT: mov w8, wzr
-; CHECK-NEXT: cntw x9
+; CHECK-NEXT: mov w9, wzr
+; CHECK-NEXT: cntw x8
; CHECK-NEXT: .LBB0_2: // %for.body
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: whilelt p0.s, w8, w0
-; CHECK-NEXT: add w8, w8, w9
+; CHECK-NEXT: whilelt p0.s, w9, w0
+; CHECK-NEXT: add w9, w9, w8
; CHECK-NEXT: b.mi .LBB0_2
; CHECK-NEXT: .LBB0_3: // %exit
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-arith.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-arith.ll
index f2c882c..20c06f0 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-arith.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-arith.ll
@@ -193,9 +193,8 @@ define void @fadd_v16f16(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.h, vl8
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fadd z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fadd z1.h, p0/m, z1.h, z3.h
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fadd z2.h, p0/m, z2.h, z3.h
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fadd_v16f16:
@@ -397,9 +396,8 @@ define void @fadd_v8f32(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.s, vl4
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fadd z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fadd z1.s, p0/m, z1.s, z3.s
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fadd z2.s, p0/m, z2.s, z3.s
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fadd_v8f32:
@@ -479,9 +477,8 @@ define void @fadd_v4f64(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.d, vl2
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fadd z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fadd z1.d, p0/m, z1.d, z3.d
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fadd z2.d, p0/m, z2.d, z3.d
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fadd_v4f64:
@@ -703,9 +700,8 @@ define void @fdiv_v16f16(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.h, vl8
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fdivr z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fdiv z1.h, p0/m, z1.h, z3.h
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fdiv z2.h, p0/m, z2.h, z3.h
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fdiv_v16f16:
@@ -907,9 +903,8 @@ define void @fdiv_v8f32(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.s, vl4
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fdivr z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fdiv z1.s, p0/m, z1.s, z3.s
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fdiv z2.s, p0/m, z2.s, z3.s
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fdiv_v8f32:
@@ -989,9 +984,8 @@ define void @fdiv_v4f64(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.d, vl2
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fdivr z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fdiv z1.d, p0/m, z1.d, z3.d
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fdiv z2.d, p0/m, z2.d, z3.d
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fdiv_v4f64:
@@ -1253,9 +1247,8 @@ define void @fma_v16f16(ptr %a, ptr %b, ptr %c) {
; CHECK-NEXT: ldp q1, q5, [x2]
; CHECK-NEXT: ldp q2, q3, [x0]
; CHECK-NEXT: fmad z0.h, p0/m, z2.h, z1.h
-; CHECK-NEXT: movprfx z1, z5
-; CHECK-NEXT: fmla z1.h, p0/m, z3.h, z4.h
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fmad z3.h, p0/m, z4.h, z5.h
+; CHECK-NEXT: stp q0, q3, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fma_v16f16:
@@ -1501,9 +1494,8 @@ define void @fma_v8f32(ptr %a, ptr %b, ptr %c) {
; CHECK-NEXT: ldp q1, q5, [x2]
; CHECK-NEXT: ldp q2, q3, [x0]
; CHECK-NEXT: fmad z0.s, p0/m, z2.s, z1.s
-; CHECK-NEXT: movprfx z1, z5
-; CHECK-NEXT: fmla z1.s, p0/m, z3.s, z4.s
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fmad z3.s, p0/m, z4.s, z5.s
+; CHECK-NEXT: stp q0, q3, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fma_v8f32:
@@ -1595,9 +1587,8 @@ define void @fma_v4f64(ptr %a, ptr %b, ptr %c) {
; CHECK-NEXT: ldp q1, q5, [x2]
; CHECK-NEXT: ldp q2, q3, [x0]
; CHECK-NEXT: fmad z0.d, p0/m, z2.d, z1.d
-; CHECK-NEXT: movprfx z1, z5
-; CHECK-NEXT: fmla z1.d, p0/m, z3.d, z4.d
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fmad z3.d, p0/m, z4.d, z5.d
+; CHECK-NEXT: stp q0, q3, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fma_v4f64:
@@ -1824,9 +1815,8 @@ define void @fmul_v16f16(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.h, vl8
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fmul z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fmul z1.h, p0/m, z1.h, z3.h
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fmul z2.h, p0/m, z2.h, z3.h
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fmul_v16f16:
@@ -2028,9 +2018,8 @@ define void @fmul_v8f32(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.s, vl4
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fmul z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fmul z1.s, p0/m, z1.s, z3.s
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fmul z2.s, p0/m, z2.s, z3.s
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fmul_v8f32:
@@ -2110,9 +2099,8 @@ define void @fmul_v4f64(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.d, vl2
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fmul z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fmul z1.d, p0/m, z1.d, z3.d
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fmul z2.d, p0/m, z2.d, z3.d
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fmul_v4f64:
@@ -3152,9 +3140,8 @@ define void @fsub_v16f16(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.h, vl8
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fsubr z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fsub z1.h, p0/m, z1.h, z3.h
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fsub z2.h, p0/m, z2.h, z3.h
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fsub_v16f16:
@@ -3356,9 +3343,8 @@ define void @fsub_v8f32(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.s, vl4
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fsubr z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fsub z1.s, p0/m, z1.s, z3.s
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fsub z2.s, p0/m, z2.s, z3.s
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fsub_v8f32:
@@ -3438,9 +3424,8 @@ define void @fsub_v4f64(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.d, vl2
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fsubr z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fsub z1.d, p0/m, z1.d, z3.d
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fsub z2.d, p0/m, z2.d, z3.d
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fsub_v4f64:
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-fma.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-fma.ll
index 680cb4f..dbacd77 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-fma.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-fma.ll
@@ -208,9 +208,8 @@ define void @fma_v16f16(ptr %a, ptr %b, ptr %c) {
; CHECK-NEXT: ldp q1, q5, [x2]
; CHECK-NEXT: ldp q2, q3, [x0]
; CHECK-NEXT: fmad z0.h, p0/m, z2.h, z1.h
-; CHECK-NEXT: movprfx z1, z5
-; CHECK-NEXT: fmla z1.h, p0/m, z3.h, z4.h
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fmad z3.h, p0/m, z4.h, z5.h
+; CHECK-NEXT: stp q0, q3, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fma_v16f16:
@@ -526,9 +525,8 @@ define void @fma_v8f32(ptr %a, ptr %b, ptr %c) {
; CHECK-NEXT: ldp q1, q5, [x2]
; CHECK-NEXT: ldp q2, q3, [x0]
; CHECK-NEXT: fmad z0.s, p0/m, z2.s, z1.s
-; CHECK-NEXT: movprfx z1, z5
-; CHECK-NEXT: fmla z1.s, p0/m, z3.s, z4.s
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fmad z3.s, p0/m, z4.s, z5.s
+; CHECK-NEXT: stp q0, q3, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fma_v8f32:
@@ -642,9 +640,8 @@ define void @fma_v4f64(ptr %a, ptr %b, ptr %c) {
; CHECK-NEXT: ldp q1, q5, [x2]
; CHECK-NEXT: ldp q2, q3, [x0]
; CHECK-NEXT: fmad z0.d, p0/m, z2.d, z1.d
-; CHECK-NEXT: movprfx z1, z5
-; CHECK-NEXT: fmla z1.d, p0/m, z3.d, z4.d
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fmad z3.d, p0/m, z4.d, z5.d
+; CHECK-NEXT: stp q0, q3, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fma_v4f64:
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-minmax.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-minmax.ll
index 84aea18..e53d6a9 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-minmax.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-minmax.ll
@@ -143,9 +143,8 @@ define void @fmaxnm_v16f16(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.h, vl8
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fmaxnm z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fmaxnm z1.h, p0/m, z1.h, z3.h
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fmaxnm z2.h, p0/m, z2.h, z3.h
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fmaxnm_v16f16:
@@ -347,9 +346,8 @@ define void @fmaxnm_v8f32(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.s, vl4
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fmaxnm z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fmaxnm z1.s, p0/m, z1.s, z3.s
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fmaxnm z2.s, p0/m, z2.s, z3.s
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fmaxnm_v8f32:
@@ -448,9 +446,8 @@ define void @fmaxnm_v4f64(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.d, vl2
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fmaxnm z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fmaxnm z1.d, p0/m, z1.d, z3.d
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fmaxnm z2.d, p0/m, z2.d, z3.d
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fmaxnm_v4f64:
@@ -622,9 +619,8 @@ define void @fminnm_v16f16(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.h, vl8
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fminnm z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fminnm z1.h, p0/m, z1.h, z3.h
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fminnm z2.h, p0/m, z2.h, z3.h
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fminnm_v16f16:
@@ -826,9 +822,8 @@ define void @fminnm_v8f32(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.s, vl4
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fminnm z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fminnm z1.s, p0/m, z1.s, z3.s
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fminnm z2.s, p0/m, z2.s, z3.s
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fminnm_v8f32:
@@ -927,9 +922,8 @@ define void @fminnm_v4f64(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.d, vl2
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fminnm z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fminnm z1.d, p0/m, z1.d, z3.d
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fminnm z2.d, p0/m, z2.d, z3.d
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fminnm_v4f64:
@@ -1101,9 +1095,8 @@ define void @fmax_v16f16(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.h, vl8
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fmax z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fmax z1.h, p0/m, z1.h, z3.h
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fmax z2.h, p0/m, z2.h, z3.h
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fmax_v16f16:
@@ -1305,9 +1298,8 @@ define void @fmax_v8f32(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.s, vl4
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fmax z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fmax z1.s, p0/m, z1.s, z3.s
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fmax z2.s, p0/m, z2.s, z3.s
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fmax_v8f32:
@@ -1406,9 +1398,8 @@ define void @fmax_v4f64(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.d, vl2
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fmax z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fmax z1.d, p0/m, z1.d, z3.d
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fmax z2.d, p0/m, z2.d, z3.d
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fmax_v4f64:
@@ -1580,9 +1571,8 @@ define void @fmin_v16f16(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.h, vl8
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fmin z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fmin z1.h, p0/m, z1.h, z3.h
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fmin z2.h, p0/m, z2.h, z3.h
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fmin_v16f16:
@@ -1784,9 +1774,8 @@ define void @fmin_v8f32(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.s, vl4
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fmin z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fmin z1.s, p0/m, z1.s, z3.s
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fmin z2.s, p0/m, z2.s, z3.s
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fmin_v8f32:
@@ -1885,9 +1874,8 @@ define void @fmin_v4f64(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.d, vl2
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fmin z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fmin z1.d, p0/m, z1.d, z3.d
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fmin z2.d, p0/m, z2.d, z3.d
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fmin_v4f64:
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-arith.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-arith.ll
index 4360f3a..02b5469 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-arith.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-arith.ll
@@ -975,9 +975,8 @@ define void @mul_v32i8(ptr %a, ptr %b) {
; SVE-NEXT: ptrue p0.b, vl16
; SVE-NEXT: ldp q1, q2, [x0]
; SVE-NEXT: mul z0.b, p0/m, z0.b, z1.b
-; SVE-NEXT: movprfx z1, z2
-; SVE-NEXT: mul z1.b, p0/m, z1.b, z3.b
-; SVE-NEXT: stp q0, q1, [x0]
+; SVE-NEXT: mul z2.b, p0/m, z2.b, z3.b
+; SVE-NEXT: stp q0, q2, [x0]
; SVE-NEXT: ret
;
; SVE2-LABEL: mul_v32i8:
@@ -1286,9 +1285,8 @@ define void @mul_v16i16(ptr %a, ptr %b) {
; SVE-NEXT: ptrue p0.h, vl8
; SVE-NEXT: ldp q1, q2, [x0]
; SVE-NEXT: mul z0.h, p0/m, z0.h, z1.h
-; SVE-NEXT: movprfx z1, z2
-; SVE-NEXT: mul z1.h, p0/m, z1.h, z3.h
-; SVE-NEXT: stp q0, q1, [x0]
+; SVE-NEXT: mul z2.h, p0/m, z2.h, z3.h
+; SVE-NEXT: stp q0, q2, [x0]
; SVE-NEXT: ret
;
; SVE2-LABEL: mul_v16i16:
@@ -1467,9 +1465,8 @@ define void @mul_v8i32(ptr %a, ptr %b) {
; SVE-NEXT: ptrue p0.s, vl4
; SVE-NEXT: ldp q1, q2, [x0]
; SVE-NEXT: mul z0.s, p0/m, z0.s, z1.s
-; SVE-NEXT: movprfx z1, z2
-; SVE-NEXT: mul z1.s, p0/m, z1.s, z3.s
-; SVE-NEXT: stp q0, q1, [x0]
+; SVE-NEXT: mul z2.s, p0/m, z2.s, z3.s
+; SVE-NEXT: stp q0, q2, [x0]
; SVE-NEXT: ret
;
; SVE2-LABEL: mul_v8i32:
@@ -1599,9 +1596,8 @@ define void @mul_v4i64(ptr %a, ptr %b) {
; SVE-NEXT: ptrue p0.d, vl2
; SVE-NEXT: ldp q1, q2, [x0]
; SVE-NEXT: mul z0.d, p0/m, z0.d, z1.d
-; SVE-NEXT: movprfx z1, z2
-; SVE-NEXT: mul z1.d, p0/m, z1.d, z3.d
-; SVE-NEXT: stp q0, q1, [x0]
+; SVE-NEXT: mul z2.d, p0/m, z2.d, z3.d
+; SVE-NEXT: stp q0, q2, [x0]
; SVE-NEXT: ret
;
; SVE2-LABEL: mul_v4i64:
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-div.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-div.ll
index 1fdcd4f..8e1d61b 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-div.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-div.ll
@@ -779,9 +779,8 @@ define void @sdiv_v8i32(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.s, vl4
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: sdivr z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: sdiv z1.s, p0/m, z1.s, z3.s
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: sdiv z2.s, p0/m, z2.s, z3.s
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: sdiv_v8i32:
@@ -886,9 +885,8 @@ define void @sdiv_v4i64(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.d, vl2
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: sdivr z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: sdiv z1.d, p0/m, z1.d, z3.d
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: sdiv z2.d, p0/m, z2.d, z3.d
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: sdiv_v4i64:
@@ -1693,9 +1691,8 @@ define void @udiv_v8i32(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.s, vl4
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: udivr z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: udiv z1.s, p0/m, z1.s, z3.s
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: udiv z2.s, p0/m, z2.s, z3.s
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: udiv_v8i32:
@@ -1800,9 +1797,8 @@ define void @udiv_v4i64(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.d, vl2
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: udivr z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: udiv z1.d, p0/m, z1.d, z3.d
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: udiv z2.d, p0/m, z2.d, z3.d
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: udiv_v4i64:
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-minmax.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-minmax.ll
index 1bca7dd..d858d81 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-minmax.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-minmax.ll
@@ -179,9 +179,8 @@ define void @smax_v32i8(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.b, vl16
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: smax z0.b, p0/m, z0.b, z1.b
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: smax z1.b, p0/m, z1.b, z3.b
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: smax z2.b, p0/m, z2.b, z3.b
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: smax_v32i8:
@@ -473,9 +472,8 @@ define void @smax_v16i16(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.h, vl8
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: smax z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: smax z1.h, p0/m, z1.h, z3.h
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: smax z2.h, p0/m, z2.h, z3.h
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: smax_v16i16:
@@ -651,9 +649,8 @@ define void @smax_v8i32(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.s, vl4
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: smax z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: smax z1.s, p0/m, z1.s, z3.s
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: smax z2.s, p0/m, z2.s, z3.s
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: smax_v8i32:
@@ -771,9 +768,8 @@ define void @smax_v4i64(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.d, vl2
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: smax z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: smax z1.d, p0/m, z1.d, z3.d
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: smax z2.d, p0/m, z2.d, z3.d
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: smax_v4i64:
@@ -985,9 +981,8 @@ define void @smin_v32i8(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.b, vl16
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: smin z0.b, p0/m, z0.b, z1.b
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: smin z1.b, p0/m, z1.b, z3.b
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: smin z2.b, p0/m, z2.b, z3.b
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: smin_v32i8:
@@ -1279,9 +1274,8 @@ define void @smin_v16i16(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.h, vl8
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: smin z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: smin z1.h, p0/m, z1.h, z3.h
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: smin z2.h, p0/m, z2.h, z3.h
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: smin_v16i16:
@@ -1457,9 +1451,8 @@ define void @smin_v8i32(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.s, vl4
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: smin z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: smin z1.s, p0/m, z1.s, z3.s
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: smin z2.s, p0/m, z2.s, z3.s
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: smin_v8i32:
@@ -1577,9 +1570,8 @@ define void @smin_v4i64(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.d, vl2
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: smin z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: smin z1.d, p0/m, z1.d, z3.d
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: smin z2.d, p0/m, z2.d, z3.d
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: smin_v4i64:
@@ -1791,9 +1783,8 @@ define void @umax_v32i8(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.b, vl16
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: umax z0.b, p0/m, z0.b, z1.b
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: umax z1.b, p0/m, z1.b, z3.b
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: umax z2.b, p0/m, z2.b, z3.b
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: umax_v32i8:
@@ -2085,9 +2076,8 @@ define void @umax_v16i16(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.h, vl8
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: umax z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: umax z1.h, p0/m, z1.h, z3.h
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: umax z2.h, p0/m, z2.h, z3.h
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: umax_v16i16:
@@ -2263,9 +2253,8 @@ define void @umax_v8i32(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.s, vl4
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: umax z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: umax z1.s, p0/m, z1.s, z3.s
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: umax z2.s, p0/m, z2.s, z3.s
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: umax_v8i32:
@@ -2383,9 +2372,8 @@ define void @umax_v4i64(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.d, vl2
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: umax z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: umax z1.d, p0/m, z1.d, z3.d
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: umax z2.d, p0/m, z2.d, z3.d
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: umax_v4i64:
@@ -2597,9 +2585,8 @@ define void @umin_v32i8(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.b, vl16
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: umin z0.b, p0/m, z0.b, z1.b
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: umin z1.b, p0/m, z1.b, z3.b
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: umin z2.b, p0/m, z2.b, z3.b
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: umin_v32i8:
@@ -2891,9 +2878,8 @@ define void @umin_v16i16(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.h, vl8
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: umin z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: umin z1.h, p0/m, z1.h, z3.h
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: umin z2.h, p0/m, z2.h, z3.h
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: umin_v16i16:
@@ -3069,9 +3055,8 @@ define void @umin_v8i32(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.s, vl4
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: umin z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: umin z1.s, p0/m, z1.s, z3.s
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: umin z2.s, p0/m, z2.s, z3.s
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: umin_v8i32:
@@ -3189,9 +3174,8 @@ define void @umin_v4i64(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.d, vl2
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: umin z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: umin z1.d, p0/m, z1.d, z3.d
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: umin z2.d, p0/m, z2.d, z3.d
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: umin_v4i64:
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mulh.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mulh.ll
index 0c97eed..85b7b4d 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mulh.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-mulh.ll
@@ -294,9 +294,8 @@ define void @smulh_v32i8(ptr %a, ptr %b) {
; SVE-NEXT: ptrue p0.b, vl16
; SVE-NEXT: ldp q1, q2, [x0]
; SVE-NEXT: smulh z0.b, p0/m, z0.b, z1.b
-; SVE-NEXT: movprfx z1, z2
-; SVE-NEXT: smulh z1.b, p0/m, z1.b, z3.b
-; SVE-NEXT: stp q0, q1, [x0]
+; SVE-NEXT: smulh z2.b, p0/m, z2.b, z3.b
+; SVE-NEXT: stp q0, q2, [x0]
; SVE-NEXT: ret
;
; SVE2-LABEL: smulh_v32i8:
@@ -755,9 +754,8 @@ define void @smulh_v16i16(ptr %a, ptr %b) {
; SVE-NEXT: ptrue p0.h, vl8
; SVE-NEXT: ldp q1, q2, [x0]
; SVE-NEXT: smulh z0.h, p0/m, z0.h, z1.h
-; SVE-NEXT: movprfx z1, z2
-; SVE-NEXT: smulh z1.h, p0/m, z1.h, z3.h
-; SVE-NEXT: stp q0, q1, [x0]
+; SVE-NEXT: smulh z2.h, p0/m, z2.h, z3.h
+; SVE-NEXT: stp q0, q2, [x0]
; SVE-NEXT: ret
;
; SVE2-LABEL: smulh_v16i16:
@@ -1001,9 +999,8 @@ define void @smulh_v8i32(ptr %a, ptr %b) {
; SVE-NEXT: ptrue p0.s, vl4
; SVE-NEXT: ldp q1, q2, [x0]
; SVE-NEXT: smulh z0.s, p0/m, z0.s, z1.s
-; SVE-NEXT: movprfx z1, z2
-; SVE-NEXT: smulh z1.s, p0/m, z1.s, z3.s
-; SVE-NEXT: stp q0, q1, [x0]
+; SVE-NEXT: smulh z2.s, p0/m, z2.s, z3.s
+; SVE-NEXT: stp q0, q2, [x0]
; SVE-NEXT: ret
;
; SVE2-LABEL: smulh_v8i32:
@@ -1159,9 +1156,8 @@ define void @smulh_v4i64(ptr %a, ptr %b) {
; SVE-NEXT: ptrue p0.d, vl2
; SVE-NEXT: ldp q1, q2, [x0]
; SVE-NEXT: smulh z0.d, p0/m, z0.d, z1.d
-; SVE-NEXT: movprfx z1, z2
-; SVE-NEXT: smulh z1.d, p0/m, z1.d, z3.d
-; SVE-NEXT: stp q0, q1, [x0]
+; SVE-NEXT: smulh z2.d, p0/m, z2.d, z3.d
+; SVE-NEXT: stp q0, q2, [x0]
; SVE-NEXT: ret
;
; SVE2-LABEL: smulh_v4i64:
@@ -1494,9 +1490,8 @@ define void @umulh_v32i8(ptr %a, ptr %b) {
; SVE-NEXT: ptrue p0.b, vl16
; SVE-NEXT: ldp q1, q2, [x0]
; SVE-NEXT: umulh z0.b, p0/m, z0.b, z1.b
-; SVE-NEXT: movprfx z1, z2
-; SVE-NEXT: umulh z1.b, p0/m, z1.b, z3.b
-; SVE-NEXT: stp q0, q1, [x0]
+; SVE-NEXT: umulh z2.b, p0/m, z2.b, z3.b
+; SVE-NEXT: stp q0, q2, [x0]
; SVE-NEXT: ret
;
; SVE2-LABEL: umulh_v32i8:
@@ -1954,9 +1949,8 @@ define void @umulh_v16i16(ptr %a, ptr %b) {
; SVE-NEXT: ptrue p0.h, vl8
; SVE-NEXT: ldp q1, q2, [x0]
; SVE-NEXT: umulh z0.h, p0/m, z0.h, z1.h
-; SVE-NEXT: movprfx z1, z2
-; SVE-NEXT: umulh z1.h, p0/m, z1.h, z3.h
-; SVE-NEXT: stp q0, q1, [x0]
+; SVE-NEXT: umulh z2.h, p0/m, z2.h, z3.h
+; SVE-NEXT: stp q0, q2, [x0]
; SVE-NEXT: ret
;
; SVE2-LABEL: umulh_v16i16:
@@ -2200,9 +2194,8 @@ define void @umulh_v8i32(ptr %a, ptr %b) {
; SVE-NEXT: ptrue p0.s, vl4
; SVE-NEXT: ldp q1, q2, [x0]
; SVE-NEXT: umulh z0.s, p0/m, z0.s, z1.s
-; SVE-NEXT: movprfx z1, z2
-; SVE-NEXT: umulh z1.s, p0/m, z1.s, z3.s
-; SVE-NEXT: stp q0, q1, [x0]
+; SVE-NEXT: umulh z2.s, p0/m, z2.s, z3.s
+; SVE-NEXT: stp q0, q2, [x0]
; SVE-NEXT: ret
;
; SVE2-LABEL: umulh_v8i32:
@@ -2358,9 +2351,8 @@ define void @umulh_v4i64(ptr %a, ptr %b) {
; SVE-NEXT: ptrue p0.d, vl2
; SVE-NEXT: ldp q1, q2, [x0]
; SVE-NEXT: umulh z0.d, p0/m, z0.d, z1.d
-; SVE-NEXT: movprfx z1, z2
-; SVE-NEXT: umulh z1.d, p0/m, z1.d, z3.d
-; SVE-NEXT: stp q0, q1, [x0]
+; SVE-NEXT: umulh z2.d, p0/m, z2.d, z3.d
+; SVE-NEXT: stp q0, q2, [x0]
; SVE-NEXT: ret
;
; SVE2-LABEL: umulh_v4i64:
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll
index 372f6a0..c4b6c0e 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-rem.ll
@@ -883,9 +883,8 @@ define void @srem_v8i32(ptr %a, ptr %b) {
; CHECK-NEXT: movprfx z5, z2
; CHECK-NEXT: sdiv z5.s, p0/m, z5.s, z3.s
; CHECK-NEXT: msb z0.s, p0/m, z4.s, z1.s
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: mls z1.s, p0/m, z5.s, z3.s
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: mls z2.s, p0/m, z5.s, z3.s
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: srem_v8i32:
@@ -1013,9 +1012,8 @@ define void @srem_v4i64(ptr %a, ptr %b) {
; CHECK-NEXT: movprfx z5, z2
; CHECK-NEXT: sdiv z5.d, p0/m, z5.d, z3.d
; CHECK-NEXT: msb z0.d, p0/m, z4.d, z1.d
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: mls z1.d, p0/m, z5.d, z3.d
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: mls z2.d, p0/m, z5.d, z3.d
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: srem_v4i64:
@@ -1933,9 +1931,8 @@ define void @urem_v8i32(ptr %a, ptr %b) {
; CHECK-NEXT: movprfx z5, z2
; CHECK-NEXT: udiv z5.s, p0/m, z5.s, z3.s
; CHECK-NEXT: msb z0.s, p0/m, z4.s, z1.s
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: mls z1.s, p0/m, z5.s, z3.s
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: mls z2.s, p0/m, z5.s, z3.s
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: urem_v8i32:
@@ -2063,9 +2060,8 @@ define void @urem_v4i64(ptr %a, ptr %b) {
; CHECK-NEXT: movprfx z5, z2
; CHECK-NEXT: udiv z5.d, p0/m, z5.d, z3.d
; CHECK-NEXT: msb z0.d, p0/m, z4.d, z1.d
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: mls z1.d, p0/m, z5.d, z3.d
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: mls z2.d, p0/m, z5.d, z3.d
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: urem_v4i64:
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-shifts.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-shifts.ll
index d0f9921..4cf8945 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-shifts.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-shifts.ll
@@ -195,9 +195,8 @@ define void @ashr_v32i8(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.b, vl16
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: asrr z0.b, p0/m, z0.b, z1.b
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: asr z1.b, p0/m, z1.b, z3.b
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: asr z2.b, p0/m, z2.b, z3.b
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: ashr_v32i8:
@@ -476,9 +475,8 @@ define void @ashr_v16i16(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.h, vl8
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: asrr z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: asr z1.h, p0/m, z1.h, z3.h
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: asr z2.h, p0/m, z2.h, z3.h
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: ashr_v16i16:
@@ -632,9 +630,8 @@ define void @ashr_v8i32(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.s, vl4
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: asrr z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: asr z1.s, p0/m, z1.s, z3.s
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: asr z2.s, p0/m, z2.s, z3.s
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: ashr_v8i32:
@@ -739,9 +736,8 @@ define void @ashr_v4i64(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.d, vl2
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: asrr z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: asr z1.d, p0/m, z1.d, z3.d
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: asr z2.d, p0/m, z2.d, z3.d
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: ashr_v4i64:
@@ -965,9 +961,8 @@ define void @lshr_v32i8(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.b, vl16
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: lsrr z0.b, p0/m, z0.b, z1.b
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: lsr z1.b, p0/m, z1.b, z3.b
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: lsr z2.b, p0/m, z2.b, z3.b
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: lshr_v32i8:
@@ -1246,9 +1241,8 @@ define void @lshr_v16i16(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.h, vl8
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: lsrr z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: lsr z1.h, p0/m, z1.h, z3.h
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: lsr z2.h, p0/m, z2.h, z3.h
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: lshr_v16i16:
@@ -1402,9 +1396,8 @@ define void @lshr_v8i32(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.s, vl4
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: lsrr z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: lsr z1.s, p0/m, z1.s, z3.s
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: lsr z2.s, p0/m, z2.s, z3.s
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: lshr_v8i32:
@@ -1509,9 +1502,8 @@ define void @lshr_v4i64(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.d, vl2
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: lsrr z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: lsr z1.d, p0/m, z1.d, z3.d
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: lsr z2.d, p0/m, z2.d, z3.d
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: lshr_v4i64:
@@ -1764,9 +1756,8 @@ define void @shl_v32i8(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.b, vl16
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: lslr z0.b, p0/m, z0.b, z1.b
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: lsl z1.b, p0/m, z1.b, z3.b
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: lsl z2.b, p0/m, z2.b, z3.b
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: shl_v32i8:
@@ -2014,9 +2005,8 @@ define void @shl_v16i16(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.h, vl8
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: lslr z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: lsl z1.h, p0/m, z1.h, z3.h
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: lsl z2.h, p0/m, z2.h, z3.h
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: shl_v16i16:
@@ -2170,9 +2160,8 @@ define void @shl_v8i32(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.s, vl4
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: lslr z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: lsl z1.s, p0/m, z1.s, z3.s
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: lsl z2.s, p0/m, z2.s, z3.s
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: shl_v8i32:
@@ -2277,9 +2266,8 @@ define void @shl_v4i64(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.d, vl2
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: lslr z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: lsl z1.d, p0/m, z1.d, z3.d
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: lsl z2.d, p0/m, z2.d, z3.d
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: shl_v4i64:
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-optimize-ptrue.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-optimize-ptrue.ll
index 74e5fe7..e9b2f53 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-optimize-ptrue.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-optimize-ptrue.ll
@@ -954,9 +954,8 @@ define void @fadd_v16f16(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.h, vl8
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fadd z0.h, p0/m, z0.h, z1.h
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fadd z1.h, p0/m, z1.h, z3.h
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fadd z2.h, p0/m, z2.h, z3.h
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fadd_v16f16:
@@ -1170,9 +1169,8 @@ define void @fadd_v8f32(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.s, vl4
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fadd z0.s, p0/m, z0.s, z1.s
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fadd z1.s, p0/m, z1.s, z3.s
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fadd z2.s, p0/m, z2.s, z3.s
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fadd_v8f32:
@@ -1258,9 +1256,8 @@ define void @fadd_v4f64(ptr %a, ptr %b) {
; CHECK-NEXT: ptrue p0.d, vl2
; CHECK-NEXT: ldp q1, q2, [x0]
; CHECK-NEXT: fadd z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: movprfx z1, z2
-; CHECK-NEXT: fadd z1.d, p0/m, z1.d, z3.d
-; CHECK-NEXT: stp q0, q1, [x0]
+; CHECK-NEXT: fadd z2.d, p0/m, z2.d, z3.d
+; CHECK-NEXT: stp q0, q2, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: fadd_v4f64:
diff --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-zip-uzp-trn.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-zip-uzp-trn.ll
index e0e88c4..e78671a 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-zip-uzp-trn.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-permute-zip-uzp-trn.ll
@@ -526,10 +526,9 @@ define void @zip_v4f64(ptr %a, ptr %b) {
; CHECK-NEXT: zip1 z5.d, z0.d, z2.d
; CHECK-NEXT: trn2 z1.d, z1.d, z3.d
; CHECK-NEXT: trn2 z0.d, z0.d, z2.d
-; CHECK-NEXT: movprfx z2, z4
-; CHECK-NEXT: fadd z2.d, p0/m, z2.d, z5.d
+; CHECK-NEXT: fadd z4.d, p0/m, z4.d, z5.d
; CHECK-NEXT: fadd z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: stp q2, q0, [x0]
+; CHECK-NEXT: stp q4, q0, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: zip_v4f64:
@@ -2159,10 +2158,9 @@ define void @zip_vscale2_4(ptr %a, ptr %b) {
; CHECK-NEXT: zip1 z5.d, z0.d, z2.d
; CHECK-NEXT: trn2 z1.d, z1.d, z3.d
; CHECK-NEXT: trn2 z0.d, z0.d, z2.d
-; CHECK-NEXT: movprfx z2, z4
-; CHECK-NEXT: fadd z2.d, p0/m, z2.d, z5.d
+; CHECK-NEXT: fadd z4.d, p0/m, z4.d, z5.d
; CHECK-NEXT: fadd z0.d, p0/m, z0.d, z1.d
-; CHECK-NEXT: stp q2, q0, [x0]
+; CHECK-NEXT: stp q4, q0, [x0]
; CHECK-NEXT: ret
;
; NONEON-NOSVE-LABEL: zip_vscale2_4:
diff --git a/llvm/test/CodeGen/AArch64/sve-vecreduce-dot.ll b/llvm/test/CodeGen/AArch64/sve-vecreduce-dot.ll
index 6af2606..0472d5c 100644
--- a/llvm/test/CodeGen/AArch64/sve-vecreduce-dot.ll
+++ b/llvm/test/CodeGen/AArch64/sve-vecreduce-dot.ll
@@ -36,10 +36,9 @@ define i32 @test(<vscale x 32 x i8> %bin.rdx, <vscale x 32 x i8> %bin.rdx2) {
; CHECK-NEXT: mla z0.s, p0/m, z25.s, z24.s
; CHECK-NEXT: mad z2.s, p0/m, z6.s, z4.s
; CHECK-NEXT: mad z1.s, p0/m, z3.s, z26.s
-; CHECK-NEXT: movprfx z3, z5
-; CHECK-NEXT: mla z3.s, p0/m, z28.s, z7.s
+; CHECK-NEXT: mla z5.s, p0/m, z28.s, z7.s
; CHECK-NEXT: add z0.s, z2.s, z0.s
-; CHECK-NEXT: add z1.s, z3.s, z1.s
+; CHECK-NEXT: add z1.s, z5.s, z1.s
; CHECK-NEXT: add z0.s, z1.s, z0.s
; CHECK-NEXT: uaddv d0, p0, z0.s
; CHECK-NEXT: fmov w0, s0
diff --git a/llvm/test/CodeGen/AArch64/sve2-xar.ll b/llvm/test/CodeGen/AArch64/sve2-xar.ll
index 888e94d..8f6f451 100644
--- a/llvm/test/CodeGen/AArch64/sve2-xar.ll
+++ b/llvm/test/CodeGen/AArch64/sve2-xar.ll
@@ -157,10 +157,9 @@ define <vscale x 2 x i64> @xar_nxv2i64_l_neg1(<vscale x 2 x i64> %x, <vscale x 2
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: and z3.d, z3.d, #0x3f
; CHECK-NEXT: and z2.d, z2.d, #0x3f
-; CHECK-NEXT: movprfx z1, z0
-; CHECK-NEXT: lsl z1.d, p0/m, z1.d, z3.d
+; CHECK-NEXT: lslr z3.d, p0/m, z3.d, z0.d
; CHECK-NEXT: lsr z0.d, p0/m, z0.d, z2.d
-; CHECK-NEXT: orr z0.d, z1.d, z0.d
+; CHECK-NEXT: orr z0.d, z3.d, z0.d
; CHECK-NEXT: ret
%a = xor <vscale x 2 x i64> %x, %y
%b = call <vscale x 2 x i64> @llvm.fshl.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %a, <vscale x 2 x i64> %z)
diff --git a/llvm/test/CodeGen/AArch64/zext-to-tbl.ll b/llvm/test/CodeGen/AArch64/zext-to-tbl.ll
index 74a717f..935189d 100644
--- a/llvm/test/CodeGen/AArch64/zext-to-tbl.ll
+++ b/llvm/test/CodeGen/AArch64/zext-to-tbl.ll
@@ -2835,11 +2835,11 @@ define i32 @test_widening_instr_mull(ptr %p1, ptr %p2, i32 %h) {
; CHECK-BE-NEXT: .LBB24_1: // %loop
; CHECK-BE-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-BE-NEXT: ld1 { v0.16b }, [x1], #16
-; CHECK-BE-NEXT: add x8, x0, #16
+; CHECK-BE-NEXT: mov x8, x0
; CHECK-BE-NEXT: ld1 { v1.8h }, [x0]
-; CHECK-BE-NEXT: ld1 { v3.8h }, [x8]
-; CHECK-BE-NEXT: add x9, x0, #48
-; CHECK-BE-NEXT: add x10, x0, #32
+; CHECK-BE-NEXT: add x0, x0, #16
+; CHECK-BE-NEXT: add x9, x8, #48
+; CHECK-BE-NEXT: ld1 { v3.8h }, [x0]
; CHECK-BE-NEXT: subs w2, w2, #1
; CHECK-BE-NEXT: ushll v2.8h, v0.8b, #0
; CHECK-BE-NEXT: ushll2 v0.8h, v0.16b, #0
@@ -2847,11 +2847,11 @@ define i32 @test_widening_instr_mull(ptr %p1, ptr %p2, i32 %h) {
; CHECK-BE-NEXT: umull2 v5.4s, v3.8h, v0.8h
; CHECK-BE-NEXT: umull v0.4s, v3.4h, v0.4h
; CHECK-BE-NEXT: umull2 v1.4s, v1.8h, v2.8h
-; CHECK-BE-NEXT: st1 { v4.4s }, [x0]
-; CHECK-BE-NEXT: mov x0, x8
+; CHECK-BE-NEXT: st1 { v4.4s }, [x8]
+; CHECK-BE-NEXT: add x8, x8, #32
; CHECK-BE-NEXT: st1 { v5.4s }, [x9]
-; CHECK-BE-NEXT: st1 { v0.4s }, [x10]
-; CHECK-BE-NEXT: st1 { v1.4s }, [x8]
+; CHECK-BE-NEXT: st1 { v0.4s }, [x8]
+; CHECK-BE-NEXT: st1 { v1.4s }, [x0]
; CHECK-BE-NEXT: b.ne .LBB24_1
; CHECK-BE-NEXT: // %bb.2: // %exit
; CHECK-BE-NEXT: mov w0, wzr
@@ -2950,26 +2950,26 @@ define i32 @test_widening_instr_mull_64(ptr %p1, ptr %p2, i32 %h) {
; CHECK-BE-NEXT: .LBB25_1: // %loop
; CHECK-BE-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-BE-NEXT: ld1 { v4.16b }, [x0]
-; CHECK-BE-NEXT: add x9, x1, #48
-; CHECK-BE-NEXT: add x8, x1, #32
-; CHECK-BE-NEXT: ld1 { v18.4s }, [x9]
+; CHECK-BE-NEXT: add x10, x1, #48
; CHECK-BE-NEXT: ld1 { v16.4s }, [x1]
+; CHECK-BE-NEXT: add x9, x1, #32
+; CHECK-BE-NEXT: ld1 { v18.4s }, [x10]
; CHECK-BE-NEXT: add x1, x1, #16
-; CHECK-BE-NEXT: ld1 { v20.4s }, [x8]
+; CHECK-BE-NEXT: ld1 { v20.4s }, [x9]
; CHECK-BE-NEXT: ld1 { v22.4s }, [x1]
-; CHECK-BE-NEXT: add x8, x0, #96
+; CHECK-BE-NEXT: add x9, x0, #96
; CHECK-BE-NEXT: tbl v5.16b, { v4.16b }, v3.16b
; CHECK-BE-NEXT: tbl v6.16b, { v4.16b }, v2.16b
; CHECK-BE-NEXT: tbl v7.16b, { v4.16b }, v1.16b
; CHECK-BE-NEXT: tbl v4.16b, { v4.16b }, v0.16b
; CHECK-BE-NEXT: ext v24.16b, v18.16b, v18.16b, #8
-; CHECK-BE-NEXT: add x9, x0, #32
+; CHECK-BE-NEXT: mov x8, x0
; CHECK-BE-NEXT: ext v25.16b, v20.16b, v20.16b, #8
-; CHECK-BE-NEXT: add x10, x0, #16
+; CHECK-BE-NEXT: add x10, x0, #32
; CHECK-BE-NEXT: subs w2, w2, #1
; CHECK-BE-NEXT: ext v17.16b, v5.16b, v5.16b, #8
-; CHECK-BE-NEXT: ext v19.16b, v6.16b, v6.16b, #8
; CHECK-BE-NEXT: rev32 v5.8b, v5.8b
+; CHECK-BE-NEXT: ext v19.16b, v6.16b, v6.16b, #8
; CHECK-BE-NEXT: rev32 v21.8b, v7.8b
; CHECK-BE-NEXT: rev32 v23.8b, v4.8b
; CHECK-BE-NEXT: ext v7.16b, v7.16b, v7.16b, #8
@@ -2986,22 +2986,22 @@ define i32 @test_widening_instr_mull_64(ptr %p1, ptr %p2, i32 %h) {
; CHECK-BE-NEXT: rev32 v4.8b, v4.8b
; CHECK-BE-NEXT: umull v17.2d, v17.2s, v24.2s
; CHECK-BE-NEXT: umull v19.2d, v19.2s, v25.2s
-; CHECK-BE-NEXT: st1 { v5.2d }, [x8]
+; CHECK-BE-NEXT: st1 { v5.2d }, [x9]
; CHECK-BE-NEXT: umull v5.2d, v6.2s, v20.2s
; CHECK-BE-NEXT: umull v6.2d, v7.2s, v21.2s
-; CHECK-BE-NEXT: add x8, x0, #112
+; CHECK-BE-NEXT: add x9, x0, #112
; CHECK-BE-NEXT: umull v4.2d, v4.2s, v16.2s
-; CHECK-BE-NEXT: st1 { v18.2d }, [x9]
-; CHECK-BE-NEXT: add x9, x0, #80
+; CHECK-BE-NEXT: st1 { v18.2d }, [x10]
+; CHECK-BE-NEXT: add x10, x0, #80
; CHECK-BE-NEXT: st1 { v22.2d }, [x0]
-; CHECK-BE-NEXT: st1 { v17.2d }, [x8]
-; CHECK-BE-NEXT: add x8, x0, #64
-; CHECK-BE-NEXT: st1 { v19.2d }, [x9]
-; CHECK-BE-NEXT: add x9, x0, #48
-; CHECK-BE-NEXT: mov x0, x8
-; CHECK-BE-NEXT: st1 { v5.2d }, [x8]
+; CHECK-BE-NEXT: add x0, x0, #64
+; CHECK-BE-NEXT: st1 { v17.2d }, [x9]
+; CHECK-BE-NEXT: add x9, x8, #48
+; CHECK-BE-NEXT: add x8, x8, #16
+; CHECK-BE-NEXT: st1 { v19.2d }, [x10]
+; CHECK-BE-NEXT: st1 { v5.2d }, [x0]
; CHECK-BE-NEXT: st1 { v6.2d }, [x9]
-; CHECK-BE-NEXT: st1 { v4.2d }, [x10]
+; CHECK-BE-NEXT: st1 { v4.2d }, [x8]
; CHECK-BE-NEXT: b.ne .LBB25_1
; CHECK-BE-NEXT: // %bb.2: // %exit
; CHECK-BE-NEXT: mov w0, wzr
@@ -3093,13 +3093,14 @@ define i32 @test_widening_instr_mull_2(ptr %p1, ptr %p2, i32 %h) {
; CHECK-BE-NEXT: .LBB26_1: // %loop
; CHECK-BE-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-BE-NEXT: ld1 { v4.16b }, [x1], #16
-; CHECK-BE-NEXT: add x8, x0, #32
+; CHECK-BE-NEXT: mov x8, x0
+; CHECK-BE-NEXT: add x9, x0, #32
; CHECK-BE-NEXT: ld1 { v16.4s }, [x0]
-; CHECK-BE-NEXT: add x9, x0, #48
-; CHECK-BE-NEXT: add x10, x0, #16
-; CHECK-BE-NEXT: ld1 { v17.4s }, [x8]
-; CHECK-BE-NEXT: ld1 { v18.4s }, [x9]
-; CHECK-BE-NEXT: ld1 { v19.4s }, [x10]
+; CHECK-BE-NEXT: add x10, x0, #48
+; CHECK-BE-NEXT: add x0, x0, #16
+; CHECK-BE-NEXT: ld1 { v17.4s }, [x9]
+; CHECK-BE-NEXT: ld1 { v18.4s }, [x10]
+; CHECK-BE-NEXT: ld1 { v19.4s }, [x0]
; CHECK-BE-NEXT: subs w2, w2, #1
; CHECK-BE-NEXT: tbl v5.16b, { v4.16b }, v1.16b
; CHECK-BE-NEXT: tbl v6.16b, { v4.16b }, v3.16b
@@ -3113,11 +3114,10 @@ define i32 @test_widening_instr_mull_2(ptr %p1, ptr %p2, i32 %h) {
; CHECK-BE-NEXT: mul v6.4s, v17.4s, v6.4s
; CHECK-BE-NEXT: mul v7.4s, v18.4s, v7.4s
; CHECK-BE-NEXT: mul v4.4s, v19.4s, v4.4s
-; CHECK-BE-NEXT: st1 { v5.4s }, [x0]
-; CHECK-BE-NEXT: mov x0, x10
-; CHECK-BE-NEXT: st1 { v6.4s }, [x8]
-; CHECK-BE-NEXT: st1 { v7.4s }, [x9]
-; CHECK-BE-NEXT: st1 { v4.4s }, [x10]
+; CHECK-BE-NEXT: st1 { v5.4s }, [x8]
+; CHECK-BE-NEXT: st1 { v6.4s }, [x9]
+; CHECK-BE-NEXT: st1 { v7.4s }, [x10]
+; CHECK-BE-NEXT: st1 { v4.4s }, [x0]
; CHECK-BE-NEXT: b.ne .LBB26_1
; CHECK-BE-NEXT: // %bb.2: // %exit
; CHECK-BE-NEXT: mov w0, wzr
@@ -3246,11 +3246,11 @@ define i32 @mul_zext_16i8_sext_16i16(ptr %p1, ptr %p2, i32 %h) {
; CHECK-BE-NEXT: .LBB28_1: // %loop
; CHECK-BE-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-BE-NEXT: ld1 { v0.16b }, [x1], #16
-; CHECK-BE-NEXT: add x8, x0, #16
+; CHECK-BE-NEXT: mov x8, x0
; CHECK-BE-NEXT: ld1 { v1.8h }, [x0]
-; CHECK-BE-NEXT: ld1 { v3.8h }, [x8]
-; CHECK-BE-NEXT: add x9, x0, #48
-; CHECK-BE-NEXT: add x10, x0, #32
+; CHECK-BE-NEXT: add x0, x0, #16
+; CHECK-BE-NEXT: add x9, x8, #48
+; CHECK-BE-NEXT: ld1 { v3.8h }, [x0]
; CHECK-BE-NEXT: subs w2, w2, #1
; CHECK-BE-NEXT: ushll v2.8h, v0.8b, #0
; CHECK-BE-NEXT: ushll2 v0.8h, v0.16b, #0
@@ -3258,11 +3258,11 @@ define i32 @mul_zext_16i8_sext_16i16(ptr %p1, ptr %p2, i32 %h) {
; CHECK-BE-NEXT: smull2 v5.4s, v3.8h, v0.8h
; CHECK-BE-NEXT: smull v0.4s, v3.4h, v0.4h
; CHECK-BE-NEXT: smull2 v1.4s, v1.8h, v2.8h
-; CHECK-BE-NEXT: st1 { v4.4s }, [x0]
-; CHECK-BE-NEXT: mov x0, x8
+; CHECK-BE-NEXT: st1 { v4.4s }, [x8]
+; CHECK-BE-NEXT: add x8, x8, #32
; CHECK-BE-NEXT: st1 { v5.4s }, [x9]
-; CHECK-BE-NEXT: st1 { v0.4s }, [x10]
-; CHECK-BE-NEXT: st1 { v1.4s }, [x8]
+; CHECK-BE-NEXT: st1 { v0.4s }, [x8]
+; CHECK-BE-NEXT: st1 { v1.4s }, [x0]
; CHECK-BE-NEXT: b.ne .LBB28_1
; CHECK-BE-NEXT: // %bb.2: // %exit
; CHECK-BE-NEXT: mov w0, wzr
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.ll
index dd01112..c1e6b4f 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-used-outside-loop.ll
@@ -21,14 +21,14 @@ define void @divergent_i1_phi_used_outside_loop(float %val, float %pre.cond.val,
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: v_cvt_f32_u32_e32 v1, s6
; GFX10-NEXT: s_mov_b32 s8, exec_lo
+; GFX10-NEXT: s_mov_b32 s9, s5
; GFX10-NEXT: s_add_i32 s6, s6, 1
-; GFX10-NEXT: s_xor_b32 s8, s5, s8
+; GFX10-NEXT: s_xor_b32 s5, s5, s8
; GFX10-NEXT: v_cmp_gt_f32_e32 vcc_lo, v1, v0
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 s7, s7, exec_lo
-; GFX10-NEXT: s_and_b32 s9, exec_lo, s5
-; GFX10-NEXT: s_mov_b32 s5, s8
-; GFX10-NEXT: s_or_b32 s7, s7, s9
+; GFX10-NEXT: s_and_b32 s8, exec_lo, s9
+; GFX10-NEXT: s_or_b32 s7, s7, s8
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB0_1
; GFX10-NEXT: ; %bb.2: ; %exit
@@ -240,11 +240,11 @@ define void @divergent_i1_xor_used_outside_loop_larger_loop_body(i32 %num.elts,
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
; GFX10-NEXT: s_mov_b32 s6, exec_lo
-; GFX10-NEXT: s_mov_b32 s8, 0
+; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: s_and_saveexec_b32 s7, vcc_lo
; GFX10-NEXT: s_cbranch_execz .LBB4_6
; GFX10-NEXT: ; %bb.1: ; %loop.start.preheader
-; GFX10-NEXT: s_mov_b32 s4, 0
+; GFX10-NEXT: s_mov_b32 s8, 0
; GFX10-NEXT: ; implicit-def: $sgpr10
; GFX10-NEXT: ; implicit-def: $sgpr11
; GFX10-NEXT: ; implicit-def: $sgpr9
@@ -345,8 +345,8 @@ define void @divergent_i1_icmp_used_outside_loop(i32 %v0, i32 %v1, ptr addrspace
; GFX10-LABEL: divergent_i1_icmp_used_outside_loop:
; GFX10: ; %bb.0: ; %entry
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: s_mov_b32 s6, 0
; GFX10-NEXT: s_mov_b32 s4, 0
+; GFX10-NEXT: s_mov_b32 s6, 0
; GFX10-NEXT: ; implicit-def: $sgpr7
; GFX10-NEXT: s_branch .LBB5_2
; GFX10-NEXT: .LBB5_1: ; %Flow
@@ -457,8 +457,8 @@ define amdgpu_ps void @divergent_i1_freeze_used_outside_loop(i32 %n, ptr addrspa
; GFX10-LABEL: divergent_i1_freeze_used_outside_loop:
; GFX10: ; %bb.0: ; %entry
; GFX10-NEXT: s_mov_b32 s1, exec_lo
-; GFX10-NEXT: s_mov_b32 s2, 0
; GFX10-NEXT: s_mov_b32 s0, 0
+; GFX10-NEXT: s_mov_b32 s2, 0
; GFX10-NEXT: ; implicit-def: $sgpr4
; GFX10-NEXT: ; implicit-def: $sgpr3
; GFX10-NEXT: s_branch .LBB6_2
@@ -534,8 +534,8 @@ exit:
define amdgpu_cs void @loop_with_1break(ptr addrspace(1) %x, ptr addrspace(1) %a, ptr addrspace(1) %a.break) {
; GFX10-LABEL: loop_with_1break:
; GFX10: ; %bb.0: ; %entry
-; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: s_mov_b32 s0, 0
+; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: ; implicit-def: $sgpr6
; GFX10-NEXT: ; implicit-def: $sgpr7
; GFX10-NEXT: ; implicit-def: $sgpr5
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.ll
index fd08ab8..484536b 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-structurizer.ll
@@ -106,8 +106,8 @@ exit:
define amdgpu_cs void @loop_with_1break(ptr addrspace(1) %x, ptr addrspace(1) %a) {
; GFX10-LABEL: loop_with_1break:
; GFX10: ; %bb.0: ; %entry
-; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: s_mov_b32 s0, 0
+; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: ; implicit-def: $sgpr5
; GFX10-NEXT: s_branch .LBB2_2
; GFX10-NEXT: .LBB2_1: ; %Flow
@@ -180,8 +180,8 @@ exit:
define amdgpu_cs void @loop_with_2breaks(ptr addrspace(1) %x, ptr addrspace(1) %a, ptr addrspace(1) %b) {
; GFX10-LABEL: loop_with_2breaks:
; GFX10: ; %bb.0: ; %entry
-; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: s_mov_b32 s0, 0
+; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: ; implicit-def: $sgpr5
; GFX10-NEXT: s_branch .LBB3_3
; GFX10-NEXT: .LBB3_1: ; %Flow3
@@ -278,8 +278,8 @@ exit:
define amdgpu_cs void @loop_with_3breaks(ptr addrspace(1) %x, ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c) {
; GFX10-LABEL: loop_with_3breaks:
; GFX10: ; %bb.0: ; %entry
-; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: s_mov_b32 s0, 0
+; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: ; implicit-def: $sgpr5
; GFX10-NEXT: s_branch .LBB4_4
; GFX10-NEXT: .LBB4_1: ; %Flow5
@@ -404,8 +404,8 @@ exit:
define amdgpu_cs void @loop_with_div_break_with_body(ptr addrspace(1) %x, ptr addrspace(1) %a, ptr addrspace(1) %a.break) {
; GFX10-LABEL: loop_with_div_break_with_body:
; GFX10: ; %bb.0: ; %entry
-; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: s_mov_b32 s0, 0
+; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: ; implicit-def: $sgpr6
; GFX10-NEXT: ; implicit-def: $sgpr7
; GFX10-NEXT: ; implicit-def: $sgpr5
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-i1.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-i1.ll
index d13d6a1..69baf61 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-i1.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-temporal-divergent-i1.ll
@@ -101,8 +101,8 @@ define amdgpu_cs void @loop_with_1break(ptr addrspace(1) %x, i32 %x.size, ptr ad
; GFX10-LABEL: loop_with_1break:
; GFX10: ; %bb.0: ; %entry
; GFX10-NEXT: v_mov_b32_e32 v3, 0
-; GFX10-NEXT: s_mov_b32 s8, 0
; GFX10-NEXT: s_mov_b32 s4, 0
+; GFX10-NEXT: s_mov_b32 s8, 0
; GFX10-NEXT: ; implicit-def: $sgpr10
; GFX10-NEXT: ; implicit-def: $sgpr9
; GFX10-NEXT: s_branch .LBB2_3
@@ -197,14 +197,14 @@ define void @nested_loops_temporal_divergence_inner(float %pre.cond.val, i32 %n.
; GFX10-LABEL: nested_loops_temporal_divergence_inner:
; GFX10: ; %bb.0: ; %entry
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_cmp_lt_f32_e64 s8, 1.0, v0
-; GFX10-NEXT: s_mov_b32 s5, 0
+; GFX10-NEXT: v_cmp_lt_f32_e64 s5, 1.0, v0
; GFX10-NEXT: s_mov_b32 s6, 0
+; GFX10-NEXT: s_mov_b32 s8, 0
; GFX10-NEXT: .LBB3_1: ; %OuterHeader
; GFX10-NEXT: ; =>This Loop Header: Depth=1
; GFX10-NEXT: ; Child Loop BB3_2 Depth 2
; GFX10-NEXT: s_ashr_i32 s7, s6, 31
-; GFX10-NEXT: s_mov_b32 s4, s8
+; GFX10-NEXT: s_mov_b32 s4, s5
; GFX10-NEXT: s_lshl_b64 s[10:11], s[6:7], 2
; GFX10-NEXT: ; implicit-def: $sgpr9
; GFX10-NEXT: v_mov_b32_e32 v6, s10
@@ -239,13 +239,13 @@ define void @nested_loops_temporal_divergence_inner(float %pre.cond.val, i32 %n.
; GFX10-NEXT: s_add_i32 s6, s6, 1
; GFX10-NEXT: v_add_co_u32 v6, s4, v4, v6
; GFX10-NEXT: v_add_co_ci_u32_e64 v7, s4, v5, v7, s4
-; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX10-NEXT: s_or_b32 s8, vcc_lo, s8
; GFX10-NEXT: flat_store_byte v[6:7], v0
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
-; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5
+; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s8
; GFX10-NEXT: s_cbranch_execnz .LBB3_1
; GFX10-NEXT: ; %bb.4: ; %exit
-; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s8
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_setpc_b64 s[30:31]
entry:
@@ -288,14 +288,14 @@ define void @nested_loops_temporal_divergence_outer(float %pre.cond.val, i32 %n.
; GFX10-LABEL: nested_loops_temporal_divergence_outer:
; GFX10: ; %bb.0: ; %entry
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_cmp_lt_f32_e64 s8, 1.0, v0
-; GFX10-NEXT: s_mov_b32 s5, 0
+; GFX10-NEXT: v_cmp_lt_f32_e64 s5, 1.0, v0
; GFX10-NEXT: s_mov_b32 s6, 0
+; GFX10-NEXT: s_mov_b32 s8, 0
; GFX10-NEXT: .LBB4_1: ; %OuterHeader
; GFX10-NEXT: ; =>This Loop Header: Depth=1
; GFX10-NEXT: ; Child Loop BB4_2 Depth 2
; GFX10-NEXT: s_ashr_i32 s7, s6, 31
-; GFX10-NEXT: s_mov_b32 s4, s8
+; GFX10-NEXT: s_mov_b32 s4, s5
; GFX10-NEXT: s_lshl_b64 s[10:11], s[6:7], 2
; GFX10-NEXT: ; implicit-def: $sgpr9
; GFX10-NEXT: v_mov_b32_e32 v6, s10
@@ -330,13 +330,13 @@ define void @nested_loops_temporal_divergence_outer(float %pre.cond.val, i32 %n.
; GFX10-NEXT: s_add_i32 s6, s6, 1
; GFX10-NEXT: v_add_co_u32 v6, s4, v4, v6
; GFX10-NEXT: v_add_co_ci_u32_e64 v7, s4, v5, v7, s4
-; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX10-NEXT: s_or_b32 s8, vcc_lo, s8
; GFX10-NEXT: flat_store_byte v[6:7], v0
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
-; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5
+; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s8
; GFX10-NEXT: s_cbranch_execnz .LBB4_1
; GFX10-NEXT: ; %bb.4: ; %exit
-; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s8
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_setpc_b64 s[30:31]
entry:
@@ -379,15 +379,15 @@ define void @nested_loops_temporal_divergence_both(float %pre.cond.val, i32 %n.i
; GFX10-LABEL: nested_loops_temporal_divergence_both:
; GFX10: ; %bb.0: ; %entry
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_cmp_lt_f32_e64 s8, 1.0, v0
-; GFX10-NEXT: s_mov_b32 s5, 0
+; GFX10-NEXT: v_cmp_lt_f32_e64 s5, 1.0, v0
; GFX10-NEXT: s_mov_b32 s6, 0
+; GFX10-NEXT: s_mov_b32 s8, 0
; GFX10-NEXT: ; implicit-def: $sgpr9
; GFX10-NEXT: .LBB5_1: ; %OuterHeader
; GFX10-NEXT: ; =>This Loop Header: Depth=1
; GFX10-NEXT: ; Child Loop BB5_2 Depth 2
; GFX10-NEXT: s_ashr_i32 s7, s6, 31
-; GFX10-NEXT: s_mov_b32 s4, s8
+; GFX10-NEXT: s_mov_b32 s4, s5
; GFX10-NEXT: s_lshl_b64 s[10:11], s[6:7], 2
; GFX10-NEXT: v_mov_b32_e32 v8, s10
; GFX10-NEXT: v_mov_b32_e32 v9, s11
@@ -421,13 +421,13 @@ define void @nested_loops_temporal_divergence_both(float %pre.cond.val, i32 %n.i
; GFX10-NEXT: s_add_i32 s6, s6, 1
; GFX10-NEXT: v_add_co_u32 v8, s4, v4, v8
; GFX10-NEXT: v_add_co_ci_u32_e64 v9, s4, v5, v9, s4
-; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5
+; GFX10-NEXT: s_or_b32 s8, vcc_lo, s8
; GFX10-NEXT: flat_store_byte v[8:9], v0
; GFX10-NEXT: s_waitcnt_depctr 0xffe3
-; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5
+; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s8
; GFX10-NEXT: s_cbranch_execnz .LBB5_1
; GFX10-NEXT: ; %bb.4: ; %exit
-; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5
+; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s8
; GFX10-NEXT: flat_store_byte v[6:7], v0
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_setpc_b64 s[30:31]
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mui.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mui.ll
index 5240bf4..9aaa9635 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mui.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-mui.ll
@@ -547,8 +547,8 @@ define amdgpu_cs void @loop_with_2breaks(ptr addrspace(1) %x, ptr addrspace(1) %
;
; NEW_RBS-LABEL: loop_with_2breaks:
; NEW_RBS: ; %bb.0: ; %entry
-; NEW_RBS-NEXT: s_mov_b32 s4, 0
; NEW_RBS-NEXT: s_mov_b32 s0, 0
+; NEW_RBS-NEXT: s_mov_b32 s4, 0
; NEW_RBS-NEXT: ; implicit-def: $sgpr5
; NEW_RBS-NEXT: s_branch .LBB16_3
; NEW_RBS-NEXT: .LBB16_1: ; %Flow3
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
index 54b1554..df77e7d 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
@@ -40,34 +40,33 @@ define amdgpu_kernel void @udiv_i32(ptr addrspace(1) %out, i32 %x, i32 %y) {
; GFX6-LABEL: udiv_i32:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s3
-; GFX6-NEXT: s_sub_i32 s4, 0, s3
-; GFX6-NEXT: s_mov_b32 s5, s1
+; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s5
+; GFX6-NEXT: s_sub_i32 s2, 0, s5
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
; GFX6-NEXT: v_rcp_iflag_f32_e32 v0, v0
; GFX6-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX6-NEXT: v_cvt_u32_f32_e32 v0, v0
-; GFX6-NEXT: v_mul_lo_u32 v1, s4, v0
-; GFX6-NEXT: s_mov_b32 s4, s0
+; GFX6-NEXT: v_mul_lo_u32 v1, s2, v0
+; GFX6-NEXT: s_mov_b32 s2, -1
; GFX6-NEXT: v_mul_hi_u32 v1, v0, v1
; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; GFX6-NEXT: v_mul_hi_u32 v0, s2, v0
-; GFX6-NEXT: v_readfirstlane_b32 s0, v0
-; GFX6-NEXT: s_mul_i32 s0, s0, s3
-; GFX6-NEXT: s_sub_i32 s0, s2, s0
-; GFX6-NEXT: s_sub_i32 s1, s0, s3
+; GFX6-NEXT: v_mul_hi_u32 v0, s4, v0
+; GFX6-NEXT: v_readfirstlane_b32 s6, v0
+; GFX6-NEXT: s_mul_i32 s6, s6, s5
+; GFX6-NEXT: s_sub_i32 s4, s4, s6
+; GFX6-NEXT: s_sub_i32 s6, s4, s5
; GFX6-NEXT: v_add_i32_e32 v1, vcc, 1, v0
-; GFX6-NEXT: s_cmp_ge_u32 s0, s3
+; GFX6-NEXT: s_cmp_ge_u32 s4, s5
; GFX6-NEXT: s_cselect_b64 vcc, -1, 0
; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; GFX6-NEXT: s_cselect_b32 s0, s1, s0
+; GFX6-NEXT: s_cselect_b32 s4, s6, s4
; GFX6-NEXT: v_add_i32_e32 v1, vcc, 1, v0
-; GFX6-NEXT: s_cmp_ge_u32 s0, s3
+; GFX6-NEXT: s_cmp_ge_u32 s4, s5
; GFX6-NEXT: s_cselect_b64 vcc, -1, 0
; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX9-LABEL: udiv_i32:
@@ -138,31 +137,30 @@ define amdgpu_kernel void @urem_i32(ptr addrspace(1) %out, i32 %x, i32 %y) {
; GFX6-LABEL: urem_i32:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s3
-; GFX6-NEXT: s_sub_i32 s4, 0, s3
-; GFX6-NEXT: s_mov_b32 s5, s1
+; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s5
+; GFX6-NEXT: s_sub_i32 s2, 0, s5
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
; GFX6-NEXT: v_rcp_iflag_f32_e32 v0, v0
; GFX6-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX6-NEXT: v_cvt_u32_f32_e32 v0, v0
-; GFX6-NEXT: v_mul_lo_u32 v1, s4, v0
-; GFX6-NEXT: s_mov_b32 s4, s0
+; GFX6-NEXT: v_mul_lo_u32 v1, s2, v0
+; GFX6-NEXT: s_mov_b32 s2, -1
; GFX6-NEXT: v_mul_hi_u32 v1, v0, v1
; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; GFX6-NEXT: v_mul_hi_u32 v0, s2, v0
-; GFX6-NEXT: v_readfirstlane_b32 s0, v0
-; GFX6-NEXT: s_mul_i32 s0, s0, s3
-; GFX6-NEXT: s_sub_i32 s0, s2, s0
-; GFX6-NEXT: s_sub_i32 s1, s0, s3
-; GFX6-NEXT: s_cmp_ge_u32 s0, s3
-; GFX6-NEXT: s_cselect_b32 s0, s1, s0
-; GFX6-NEXT: s_sub_i32 s1, s0, s3
-; GFX6-NEXT: s_cmp_ge_u32 s0, s3
-; GFX6-NEXT: s_cselect_b32 s0, s1, s0
-; GFX6-NEXT: v_mov_b32_e32 v0, s0
-; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX6-NEXT: v_mul_hi_u32 v0, s4, v0
+; GFX6-NEXT: v_readfirstlane_b32 s6, v0
+; GFX6-NEXT: s_mul_i32 s6, s6, s5
+; GFX6-NEXT: s_sub_i32 s4, s4, s6
+; GFX6-NEXT: s_sub_i32 s6, s4, s5
+; GFX6-NEXT: s_cmp_ge_u32 s4, s5
+; GFX6-NEXT: s_cselect_b32 s4, s6, s4
+; GFX6-NEXT: s_sub_i32 s6, s4, s5
+; GFX6-NEXT: s_cmp_ge_u32 s4, s5
+; GFX6-NEXT: s_cselect_b32 s4, s6, s4
+; GFX6-NEXT: v_mov_b32_e32 v0, s4
+; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX9-LABEL: urem_i32:
@@ -242,40 +240,39 @@ define amdgpu_kernel void @sdiv_i32(ptr addrspace(1) %out, i32 %x, i32 %y) {
; GFX6-LABEL: sdiv_i32:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: s_abs_i32 s8, s3
-; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s8
-; GFX6-NEXT: s_sub_i32 s4, 0, s8
-; GFX6-NEXT: s_mov_b32 s5, s1
-; GFX6-NEXT: s_xor_b32 s1, s2, s3
+; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT: s_abs_i32 s6, s5
+; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s6
+; GFX6-NEXT: s_sub_i32 s2, 0, s6
+; GFX6-NEXT: s_abs_i32 s7, s4
+; GFX6-NEXT: s_xor_b32 s4, s4, s5
; GFX6-NEXT: v_rcp_iflag_f32_e32 v0, v0
-; GFX6-NEXT: s_ashr_i32 s1, s1, 31
+; GFX6-NEXT: s_ashr_i32 s4, s4, 31
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
; GFX6-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX6-NEXT: v_cvt_u32_f32_e32 v0, v0
-; GFX6-NEXT: v_mul_lo_u32 v1, s4, v0
-; GFX6-NEXT: s_mov_b32 s4, s0
-; GFX6-NEXT: s_abs_i32 s0, s2
+; GFX6-NEXT: v_mul_lo_u32 v1, s2, v0
+; GFX6-NEXT: s_mov_b32 s2, -1
; GFX6-NEXT: v_mul_hi_u32 v1, v0, v1
; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; GFX6-NEXT: v_mul_hi_u32 v0, s0, v0
-; GFX6-NEXT: v_readfirstlane_b32 s2, v0
-; GFX6-NEXT: s_mul_i32 s2, s2, s8
-; GFX6-NEXT: s_sub_i32 s0, s0, s2
-; GFX6-NEXT: s_sub_i32 s2, s0, s8
+; GFX6-NEXT: v_mul_hi_u32 v0, s7, v0
+; GFX6-NEXT: v_readfirstlane_b32 s5, v0
+; GFX6-NEXT: s_mul_i32 s5, s5, s6
+; GFX6-NEXT: s_sub_i32 s5, s7, s5
+; GFX6-NEXT: s_sub_i32 s7, s5, s6
; GFX6-NEXT: v_add_i32_e32 v1, vcc, 1, v0
-; GFX6-NEXT: s_cmp_ge_u32 s0, s8
+; GFX6-NEXT: s_cmp_ge_u32 s5, s6
; GFX6-NEXT: s_cselect_b64 vcc, -1, 0
; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; GFX6-NEXT: s_cselect_b32 s0, s2, s0
+; GFX6-NEXT: s_cselect_b32 s5, s7, s5
; GFX6-NEXT: v_add_i32_e32 v1, vcc, 1, v0
-; GFX6-NEXT: s_cmp_ge_u32 s0, s8
+; GFX6-NEXT: s_cmp_ge_u32 s5, s6
; GFX6-NEXT: s_cselect_b64 vcc, -1, 0
; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; GFX6-NEXT: v_xor_b32_e32 v0, s1, v0
-; GFX6-NEXT: v_subrev_i32_e32 v0, vcc, s1, v0
-; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX6-NEXT: v_xor_b32_e32 v0, s4, v0
+; GFX6-NEXT: v_subrev_i32_e32 v0, vcc, s4, v0
+; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX9-LABEL: sdiv_i32:
@@ -360,36 +357,35 @@ define amdgpu_kernel void @srem_i32(ptr addrspace(1) %out, i32 %x, i32 %y) {
; GFX6-LABEL: srem_i32:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: s_abs_i32 s3, s3
-; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s3
-; GFX6-NEXT: s_sub_i32 s4, 0, s3
-; GFX6-NEXT: s_abs_i32 s8, s2
-; GFX6-NEXT: s_mov_b32 s5, s1
+; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT: s_abs_i32 s5, s5
+; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s5
+; GFX6-NEXT: s_sub_i32 s2, 0, s5
+; GFX6-NEXT: s_abs_i32 s6, s4
+; GFX6-NEXT: s_ashr_i32 s4, s4, 31
; GFX6-NEXT: v_rcp_iflag_f32_e32 v0, v0
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
; GFX6-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX6-NEXT: v_cvt_u32_f32_e32 v0, v0
-; GFX6-NEXT: v_mul_lo_u32 v1, s4, v0
-; GFX6-NEXT: s_mov_b32 s4, s0
-; GFX6-NEXT: s_ashr_i32 s0, s2, 31
+; GFX6-NEXT: v_mul_lo_u32 v1, s2, v0
+; GFX6-NEXT: s_mov_b32 s2, -1
; GFX6-NEXT: v_mul_hi_u32 v1, v0, v1
; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; GFX6-NEXT: v_mul_hi_u32 v0, s8, v0
-; GFX6-NEXT: v_readfirstlane_b32 s1, v0
-; GFX6-NEXT: s_mul_i32 s1, s1, s3
-; GFX6-NEXT: s_sub_i32 s1, s8, s1
-; GFX6-NEXT: s_sub_i32 s2, s1, s3
-; GFX6-NEXT: s_cmp_ge_u32 s1, s3
-; GFX6-NEXT: s_cselect_b32 s1, s2, s1
-; GFX6-NEXT: s_sub_i32 s2, s1, s3
-; GFX6-NEXT: s_cmp_ge_u32 s1, s3
-; GFX6-NEXT: s_cselect_b32 s1, s2, s1
-; GFX6-NEXT: s_xor_b32 s1, s1, s0
-; GFX6-NEXT: s_sub_i32 s0, s1, s0
-; GFX6-NEXT: v_mov_b32_e32 v0, s0
-; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX6-NEXT: v_mul_hi_u32 v0, s6, v0
+; GFX6-NEXT: v_readfirstlane_b32 s7, v0
+; GFX6-NEXT: s_mul_i32 s7, s7, s5
+; GFX6-NEXT: s_sub_i32 s6, s6, s7
+; GFX6-NEXT: s_sub_i32 s7, s6, s5
+; GFX6-NEXT: s_cmp_ge_u32 s6, s5
+; GFX6-NEXT: s_cselect_b32 s6, s7, s6
+; GFX6-NEXT: s_sub_i32 s7, s6, s5
+; GFX6-NEXT: s_cmp_ge_u32 s6, s5
+; GFX6-NEXT: s_cselect_b32 s5, s7, s6
+; GFX6-NEXT: s_xor_b32 s5, s5, s4
+; GFX6-NEXT: s_sub_i32 s4, s5, s4
+; GFX6-NEXT: v_mov_b32_e32 v0, s4
+; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX9-LABEL: srem_i32:
@@ -5462,15 +5458,14 @@ define amdgpu_kernel void @udiv_i32_pow2_shl_denom(ptr addrspace(1) %out, i32 %x
; GFX6-LABEL: udiv_i32_pow2_shl_denom:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: s_mov_b32 s4, s0
-; GFX6-NEXT: s_add_i32 s0, s3, 12
-; GFX6-NEXT: s_lshr_b32 s0, s2, s0
-; GFX6-NEXT: s_mov_b32 s5, s1
-; GFX6-NEXT: v_mov_b32_e32 v0, s0
-; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT: s_add_i32 s5, s5, 12
+; GFX6-NEXT: s_lshr_b32 s4, s4, s5
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
+; GFX6-NEXT: s_mov_b32 s2, -1
+; GFX6-NEXT: v_mov_b32_e32 v0, s4
+; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX9-LABEL: udiv_i32_pow2_shl_denom:
@@ -5503,16 +5498,15 @@ define amdgpu_kernel void @udiv_v2i32_pow2k_denom(ptr addrspace(1) %out, <2 x i3
; GFX6-LABEL: udiv_v2i32_pow2k_denom:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: s_mov_b32 s4, s0
-; GFX6-NEXT: s_mov_b32 s5, s1
-; GFX6-NEXT: s_lshr_b32 s0, s2, 12
-; GFX6-NEXT: s_lshr_b32 s1, s3, 12
-; GFX6-NEXT: v_mov_b32_e32 v0, s0
-; GFX6-NEXT: v_mov_b32_e32 v1, s1
-; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT: s_lshr_b32 s4, s4, 12
+; GFX6-NEXT: s_lshr_b32 s5, s5, 12
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
+; GFX6-NEXT: s_mov_b32 s2, -1
+; GFX6-NEXT: v_mov_b32_e32 v0, s4
+; GFX6-NEXT: v_mov_b32_e32 v1, s5
+; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX9-LABEL: udiv_v2i32_pow2k_denom:
@@ -5546,19 +5540,18 @@ define amdgpu_kernel void @udiv_v2i32_mixed_pow2k_denom(ptr addrspace(1) %out, <
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GFX6-NEXT: v_mov_b32_e32 v0, 0x100101
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_mul_hi_u32 v0, s3, v0
-; GFX6-NEXT: s_mov_b32 s4, s0
-; GFX6-NEXT: s_lshr_b32 s0, s2, 12
-; GFX6-NEXT: s_mov_b32 s5, s1
-; GFX6-NEXT: v_sub_i32_e32 v1, vcc, s3, v0
+; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT: v_mul_hi_u32 v0, s5, v0
+; GFX6-NEXT: s_lshr_b32 s4, s4, 12
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
+; GFX6-NEXT: s_mov_b32 s2, -1
+; GFX6-NEXT: v_sub_i32_e32 v1, vcc, s5, v0
; GFX6-NEXT: v_lshrrev_b32_e32 v1, 1, v1
; GFX6-NEXT: v_add_i32_e32 v0, vcc, v1, v0
; GFX6-NEXT: v_lshrrev_b32_e32 v1, 11, v0
-; GFX6-NEXT: v_mov_b32_e32 v0, s0
-; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GFX6-NEXT: v_mov_b32_e32 v0, s4
+; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX9-LABEL: udiv_v2i32_mixed_pow2k_denom:
@@ -5855,16 +5848,15 @@ define amdgpu_kernel void @urem_i32_pow2_shl_denom(ptr addrspace(1) %out, i32 %x
; GFX6-LABEL: urem_i32_pow2_shl_denom:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: s_mov_b32 s4, s0
-; GFX6-NEXT: s_lshl_b32 s0, 0x1000, s3
-; GFX6-NEXT: s_add_i32 s0, s0, -1
-; GFX6-NEXT: s_and_b32 s0, s2, s0
-; GFX6-NEXT: s_mov_b32 s5, s1
-; GFX6-NEXT: v_mov_b32_e32 v0, s0
-; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT: s_lshl_b32 s5, 0x1000, s5
+; GFX6-NEXT: s_add_i32 s5, s5, -1
+; GFX6-NEXT: s_and_b32 s4, s4, s5
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
+; GFX6-NEXT: s_mov_b32 s2, -1
+; GFX6-NEXT: v_mov_b32_e32 v0, s4
+; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX9-LABEL: urem_i32_pow2_shl_denom:
@@ -5898,16 +5890,15 @@ define amdgpu_kernel void @urem_v2i32_pow2k_denom(ptr addrspace(1) %out, <2 x i3
; GFX6-LABEL: urem_v2i32_pow2k_denom:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: s_mov_b32 s4, s0
-; GFX6-NEXT: s_mov_b32 s5, s1
-; GFX6-NEXT: s_and_b32 s0, s2, 0xfff
-; GFX6-NEXT: s_and_b32 s1, s3, 0xfff
-; GFX6-NEXT: v_mov_b32_e32 v0, s0
-; GFX6-NEXT: v_mov_b32_e32 v1, s1
-; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT: s_and_b32 s4, s4, 0xfff
+; GFX6-NEXT: s_and_b32 s5, s5, 0xfff
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
+; GFX6-NEXT: s_mov_b32 s2, -1
+; GFX6-NEXT: v_mov_b32_e32 v0, s4
+; GFX6-NEXT: v_mov_b32_e32 v1, s5
+; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX9-LABEL: urem_v2i32_pow2k_denom:
@@ -6187,41 +6178,40 @@ define amdgpu_kernel void @sdiv_i32_pow2_shl_denom(ptr addrspace(1) %out, i32 %x
; GFX6-LABEL: sdiv_i32_pow2_shl_denom:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: s_lshl_b32 s3, 0x1000, s3
-; GFX6-NEXT: s_abs_i32 s8, s3
-; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s8
-; GFX6-NEXT: s_sub_i32 s4, 0, s8
-; GFX6-NEXT: s_abs_i32 s9, s2
-; GFX6-NEXT: s_mov_b32 s5, s1
+; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT: s_lshl_b32 s5, 0x1000, s5
+; GFX6-NEXT: s_abs_i32 s6, s5
+; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s6
+; GFX6-NEXT: s_sub_i32 s2, 0, s6
+; GFX6-NEXT: s_abs_i32 s7, s4
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
; GFX6-NEXT: v_rcp_iflag_f32_e32 v0, v0
; GFX6-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX6-NEXT: v_cvt_u32_f32_e32 v0, v0
-; GFX6-NEXT: v_mul_lo_u32 v1, s4, v0
-; GFX6-NEXT: s_mov_b32 s4, s0
+; GFX6-NEXT: v_mul_lo_u32 v1, s2, v0
+; GFX6-NEXT: s_mov_b32 s2, -1
; GFX6-NEXT: v_mul_hi_u32 v1, v0, v1
; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; GFX6-NEXT: v_mul_hi_u32 v0, s9, v0
-; GFX6-NEXT: v_readfirstlane_b32 s0, v0
-; GFX6-NEXT: s_mul_i32 s0, s0, s8
-; GFX6-NEXT: s_sub_i32 s0, s9, s0
-; GFX6-NEXT: s_sub_i32 s1, s0, s8
+; GFX6-NEXT: v_mul_hi_u32 v0, s7, v0
+; GFX6-NEXT: v_readfirstlane_b32 s8, v0
+; GFX6-NEXT: s_mul_i32 s8, s8, s6
+; GFX6-NEXT: s_sub_i32 s7, s7, s8
+; GFX6-NEXT: s_sub_i32 s8, s7, s6
; GFX6-NEXT: v_add_i32_e32 v1, vcc, 1, v0
-; GFX6-NEXT: s_cmp_ge_u32 s0, s8
+; GFX6-NEXT: s_cmp_ge_u32 s7, s6
; GFX6-NEXT: s_cselect_b64 vcc, -1, 0
-; GFX6-NEXT: s_cselect_b32 s0, s1, s0
+; GFX6-NEXT: s_cselect_b32 s7, s8, s7
; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
; GFX6-NEXT: v_add_i32_e32 v1, vcc, 1, v0
-; GFX6-NEXT: s_cmp_ge_u32 s0, s8
+; GFX6-NEXT: s_cmp_ge_u32 s7, s6
; GFX6-NEXT: s_cselect_b64 vcc, -1, 0
-; GFX6-NEXT: s_xor_b32 s0, s2, s3
+; GFX6-NEXT: s_xor_b32 s4, s4, s5
; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; GFX6-NEXT: s_ashr_i32 s0, s0, 31
-; GFX6-NEXT: v_xor_b32_e32 v0, s0, v0
-; GFX6-NEXT: v_subrev_i32_e32 v0, vcc, s0, v0
-; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX6-NEXT: s_ashr_i32 s4, s4, 31
+; GFX6-NEXT: v_xor_b32_e32 v0, s4, v0
+; GFX6-NEXT: v_subrev_i32_e32 v0, vcc, s4, v0
+; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX9-LABEL: sdiv_i32_pow2_shl_denom:
@@ -6279,22 +6269,21 @@ define amdgpu_kernel void @sdiv_v2i32_pow2k_denom(ptr addrspace(1) %out, <2 x i3
; GFX6-LABEL: sdiv_v2i32_pow2k_denom:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: s_mov_b32 s4, s0
-; GFX6-NEXT: s_mov_b32 s5, s1
-; GFX6-NEXT: s_ashr_i32 s0, s2, 31
-; GFX6-NEXT: s_ashr_i32 s1, s3, 31
-; GFX6-NEXT: s_lshr_b32 s0, s0, 20
-; GFX6-NEXT: s_lshr_b32 s1, s1, 20
-; GFX6-NEXT: s_add_i32 s0, s2, s0
-; GFX6-NEXT: s_add_i32 s1, s3, s1
-; GFX6-NEXT: s_ashr_i32 s0, s0, 12
-; GFX6-NEXT: s_ashr_i32 s1, s1, 12
-; GFX6-NEXT: v_mov_b32_e32 v0, s0
-; GFX6-NEXT: v_mov_b32_e32 v1, s1
-; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT: s_ashr_i32 s6, s4, 31
+; GFX6-NEXT: s_ashr_i32 s7, s5, 31
+; GFX6-NEXT: s_lshr_b32 s6, s6, 20
+; GFX6-NEXT: s_lshr_b32 s7, s7, 20
+; GFX6-NEXT: s_add_i32 s4, s4, s6
+; GFX6-NEXT: s_add_i32 s5, s5, s7
+; GFX6-NEXT: s_ashr_i32 s4, s4, 12
+; GFX6-NEXT: s_ashr_i32 s5, s5, 12
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
+; GFX6-NEXT: s_mov_b32 s2, -1
+; GFX6-NEXT: v_mov_b32_e32 v0, s4
+; GFX6-NEXT: v_mov_b32_e32 v1, s5
+; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX9-LABEL: sdiv_v2i32_pow2k_denom:
@@ -6334,22 +6323,21 @@ define amdgpu_kernel void @ssdiv_v2i32_mixed_pow2k_denom(ptr addrspace(1) %out,
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GFX6-NEXT: v_mov_b32_e32 v0, 0x80080081
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_mul_hi_i32 v0, s3, v0
-; GFX6-NEXT: s_mov_b32 s4, s0
-; GFX6-NEXT: s_ashr_i32 s0, s2, 31
-; GFX6-NEXT: s_lshr_b32 s0, s0, 20
-; GFX6-NEXT: s_add_i32 s0, s2, s0
-; GFX6-NEXT: v_add_i32_e32 v0, vcc, s3, v0
-; GFX6-NEXT: s_ashr_i32 s0, s0, 12
+; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT: v_mul_hi_i32 v0, s5, v0
+; GFX6-NEXT: s_ashr_i32 s6, s4, 31
+; GFX6-NEXT: s_lshr_b32 s6, s6, 20
+; GFX6-NEXT: s_add_i32 s4, s4, s6
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, s5, v0
+; GFX6-NEXT: s_ashr_i32 s4, s4, 12
; GFX6-NEXT: v_lshrrev_b32_e32 v1, 31, v0
; GFX6-NEXT: v_ashrrev_i32_e32 v0, 11, v0
-; GFX6-NEXT: s_mov_b32 s5, s1
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
+; GFX6-NEXT: s_mov_b32 s2, -1
; GFX6-NEXT: v_add_i32_e32 v1, vcc, v0, v1
-; GFX6-NEXT: v_mov_b32_e32 v0, s0
-; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GFX6-NEXT: v_mov_b32_e32 v0, s4
+; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX9-LABEL: ssdiv_v2i32_mixed_pow2k_denom:
@@ -6700,37 +6688,36 @@ define amdgpu_kernel void @srem_i32_pow2_shl_denom(ptr addrspace(1) %out, i32 %x
; GFX6-LABEL: srem_i32_pow2_shl_denom:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: s_lshl_b32 s3, 0x1000, s3
-; GFX6-NEXT: s_abs_i32 s3, s3
-; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s3
-; GFX6-NEXT: s_sub_i32 s4, 0, s3
-; GFX6-NEXT: s_abs_i32 s8, s2
-; GFX6-NEXT: s_mov_b32 s5, s1
+; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT: s_lshl_b32 s2, 0x1000, s5
+; GFX6-NEXT: s_abs_i32 s5, s2
+; GFX6-NEXT: v_cvt_f32_u32_e32 v0, s5
+; GFX6-NEXT: s_sub_i32 s2, 0, s5
+; GFX6-NEXT: s_abs_i32 s6, s4
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
; GFX6-NEXT: v_rcp_iflag_f32_e32 v0, v0
; GFX6-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
; GFX6-NEXT: v_cvt_u32_f32_e32 v0, v0
-; GFX6-NEXT: v_mul_lo_u32 v1, s4, v0
-; GFX6-NEXT: s_mov_b32 s4, s0
+; GFX6-NEXT: v_mul_lo_u32 v1, s2, v0
+; GFX6-NEXT: s_mov_b32 s2, -1
; GFX6-NEXT: v_mul_hi_u32 v1, v0, v1
; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; GFX6-NEXT: v_mul_hi_u32 v0, s8, v0
-; GFX6-NEXT: v_readfirstlane_b32 s0, v0
-; GFX6-NEXT: s_mul_i32 s0, s0, s3
-; GFX6-NEXT: s_sub_i32 s0, s8, s0
-; GFX6-NEXT: s_sub_i32 s1, s0, s3
-; GFX6-NEXT: s_cmp_ge_u32 s0, s3
-; GFX6-NEXT: s_cselect_b32 s0, s1, s0
-; GFX6-NEXT: s_sub_i32 s1, s0, s3
-; GFX6-NEXT: s_cmp_ge_u32 s0, s3
-; GFX6-NEXT: s_cselect_b32 s0, s1, s0
-; GFX6-NEXT: s_ashr_i32 s1, s2, 31
-; GFX6-NEXT: s_xor_b32 s0, s0, s1
-; GFX6-NEXT: s_sub_i32 s0, s0, s1
-; GFX6-NEXT: v_mov_b32_e32 v0, s0
-; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX6-NEXT: v_mul_hi_u32 v0, s6, v0
+; GFX6-NEXT: v_readfirstlane_b32 s7, v0
+; GFX6-NEXT: s_mul_i32 s7, s7, s5
+; GFX6-NEXT: s_sub_i32 s6, s6, s7
+; GFX6-NEXT: s_sub_i32 s7, s6, s5
+; GFX6-NEXT: s_cmp_ge_u32 s6, s5
+; GFX6-NEXT: s_cselect_b32 s6, s7, s6
+; GFX6-NEXT: s_sub_i32 s7, s6, s5
+; GFX6-NEXT: s_cmp_ge_u32 s6, s5
+; GFX6-NEXT: s_cselect_b32 s5, s7, s6
+; GFX6-NEXT: s_ashr_i32 s4, s4, 31
+; GFX6-NEXT: s_xor_b32 s5, s5, s4
+; GFX6-NEXT: s_sub_i32 s4, s5, s4
+; GFX6-NEXT: v_mov_b32_e32 v0, s4
+; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX9-LABEL: srem_i32_pow2_shl_denom:
@@ -6785,24 +6772,23 @@ define amdgpu_kernel void @srem_v2i32_pow2k_denom(ptr addrspace(1) %out, <2 x i3
; GFX6-LABEL: srem_v2i32_pow2k_denom:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: s_mov_b32 s4, s0
-; GFX6-NEXT: s_mov_b32 s5, s1
-; GFX6-NEXT: s_ashr_i32 s0, s2, 31
-; GFX6-NEXT: s_ashr_i32 s1, s3, 31
-; GFX6-NEXT: s_lshr_b32 s0, s0, 20
-; GFX6-NEXT: s_lshr_b32 s1, s1, 20
-; GFX6-NEXT: s_add_i32 s0, s2, s0
-; GFX6-NEXT: s_add_i32 s1, s3, s1
-; GFX6-NEXT: s_and_b32 s0, s0, 0xfffff000
-; GFX6-NEXT: s_and_b32 s1, s1, 0xfffff000
-; GFX6-NEXT: s_sub_i32 s0, s2, s0
-; GFX6-NEXT: s_sub_i32 s1, s3, s1
-; GFX6-NEXT: v_mov_b32_e32 v0, s0
-; GFX6-NEXT: v_mov_b32_e32 v1, s1
-; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT: s_ashr_i32 s6, s4, 31
+; GFX6-NEXT: s_lshr_b32 s6, s6, 20
+; GFX6-NEXT: s_ashr_i32 s7, s5, 31
+; GFX6-NEXT: s_add_i32 s6, s4, s6
+; GFX6-NEXT: s_lshr_b32 s7, s7, 20
+; GFX6-NEXT: s_and_b32 s6, s6, 0xfffff000
+; GFX6-NEXT: s_sub_i32 s4, s4, s6
+; GFX6-NEXT: s_add_i32 s6, s5, s7
+; GFX6-NEXT: s_and_b32 s6, s6, 0xfffff000
+; GFX6-NEXT: s_sub_i32 s5, s5, s6
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
+; GFX6-NEXT: s_mov_b32 s2, -1
+; GFX6-NEXT: v_mov_b32_e32 v0, s4
+; GFX6-NEXT: v_mov_b32_e32 v1, s5
+; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX9-LABEL: srem_v2i32_pow2k_denom:
diff --git a/llvm/test/CodeGen/AMDGPU/and.ll b/llvm/test/CodeGen/AMDGPU/and.ll
index 29bfc25..fe9ec8e 100644
--- a/llvm/test/CodeGen/AMDGPU/and.ll
+++ b/llvm/test/CodeGen/AMDGPU/and.ll
@@ -123,27 +123,25 @@ define amdgpu_kernel void @s_and_i32(ptr addrspace(1) %out, i32 %a, i32 %b) {
; GFX6-LABEL: s_and_i32:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: s_mov_b32 s4, s0
-; GFX6-NEXT: s_and_b32 s0, s2, s3
-; GFX6-NEXT: s_mov_b32 s5, s1
-; GFX6-NEXT: v_mov_b32_e32 v0, s0
-; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT: s_and_b32 s4, s4, s5
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
+; GFX6-NEXT: s_mov_b32 s2, -1
+; GFX6-NEXT: v_mov_b32_e32 v0, s4
+; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX8-LABEL: s_and_i32:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX8-NEXT: s_mov_b32 s7, 0xf000
-; GFX8-NEXT: s_mov_b32 s6, -1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: s_mov_b32 s4, s0
-; GFX8-NEXT: s_and_b32 s0, s2, s3
-; GFX8-NEXT: s_mov_b32 s5, s1
-; GFX8-NEXT: v_mov_b32_e32 v0, s0
-; GFX8-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX8-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX8-NEXT: s_and_b32 s4, s4, s5
+; GFX8-NEXT: s_mov_b32 s3, 0xf000
+; GFX8-NEXT: s_mov_b32 s2, -1
+; GFX8-NEXT: v_mov_b32_e32 v0, s4
+; GFX8-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX8-NEXT: s_endpgm
%and = and i32 %a, %b
store i32 %and, ptr addrspace(1) %out, align 4
@@ -189,36 +187,34 @@ define amdgpu_kernel void @s_and_multi_use_constant_i32_0(ptr addrspace(1) %out,
; GFX6-LABEL: s_and_multi_use_constant_i32_0:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: s_mov_b32 s6, -1
-; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: s_mov_b32 s4, s0
-; GFX6-NEXT: s_and_b32 s0, s2, 0x12d687
-; GFX6-NEXT: s_add_i32 s0, s0, s3
-; GFX6-NEXT: s_mov_b32 s5, s1
-; GFX6-NEXT: v_mov_b32_e32 v0, s0
-; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0
-; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; GFX6-NEXT: v_mov_b32_e32 v0, 0x12d687
-; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX6-NEXT: s_waitcnt lgkmcnt(0)
+; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT: s_and_b32 s4, s4, 0x12d687
+; GFX6-NEXT: s_add_i32 s4, s4, s5
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
+; GFX6-NEXT: s_mov_b32 s2, -1
+; GFX6-NEXT: v_mov_b32_e32 v1, s4
+; GFX6-NEXT: buffer_store_dword v1, off, s[0:3], 0
+; GFX6-NEXT: s_waitcnt vmcnt(0)
+; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: s_endpgm
;
; GFX8-LABEL: s_and_multi_use_constant_i32_0:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX8-NEXT: s_mov_b32 s7, 0xf000
-; GFX8-NEXT: s_mov_b32 s6, -1
+; GFX8-NEXT: v_mov_b32_e32 v0, 0x12d687
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: s_mov_b32 s4, s0
-; GFX8-NEXT: s_and_b32 s0, s2, 0x12d687
-; GFX8-NEXT: s_add_i32 s0, s0, s3
-; GFX8-NEXT: s_mov_b32 s5, s1
-; GFX8-NEXT: v_mov_b32_e32 v0, s0
-; GFX8-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX8-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX8-NEXT: s_and_b32 s4, s4, 0x12d687
+; GFX8-NEXT: s_add_i32 s4, s4, s5
+; GFX8-NEXT: s_mov_b32 s3, 0xf000
+; GFX8-NEXT: s_mov_b32 s2, -1
+; GFX8-NEXT: v_mov_b32_e32 v1, s4
+; GFX8-NEXT: buffer_store_dword v1, off, s[0:3], 0
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_mov_b32_e32 v0, 0x12d687
-; GFX8-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX8-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: s_endpgm
%and = and i32 %a, 1234567
@@ -236,32 +232,30 @@ define amdgpu_kernel void @s_and_multi_use_constant_i32_1(ptr addrspace(1) %out,
; GFX6-LABEL: s_and_multi_use_constant_i32_1:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: s_mov_b32 s4, s0
-; GFX6-NEXT: s_and_b32 s0, s2, 0x12d687
-; GFX6-NEXT: s_add_i32 s0, s0, s3
-; GFX6-NEXT: s_add_i32 s0, s0, 0x12d687
-; GFX6-NEXT: s_mov_b32 s5, s1
-; GFX6-NEXT: v_mov_b32_e32 v0, s0
-; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT: s_and_b32 s4, s4, 0x12d687
+; GFX6-NEXT: s_add_i32 s4, s4, s5
+; GFX6-NEXT: s_add_i32 s4, s4, 0x12d687
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
+; GFX6-NEXT: s_mov_b32 s2, -1
+; GFX6-NEXT: v_mov_b32_e32 v0, s4
+; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: s_endpgm
;
; GFX8-LABEL: s_and_multi_use_constant_i32_1:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX8-NEXT: s_mov_b32 s7, 0xf000
-; GFX8-NEXT: s_mov_b32 s6, -1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: s_mov_b32 s4, s0
-; GFX8-NEXT: s_and_b32 s0, s2, 0x12d687
-; GFX8-NEXT: s_add_i32 s0, s0, s3
-; GFX8-NEXT: s_add_i32 s0, s0, 0x12d687
-; GFX8-NEXT: s_mov_b32 s5, s1
-; GFX8-NEXT: v_mov_b32_e32 v0, s0
-; GFX8-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX8-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX8-NEXT: s_and_b32 s4, s4, 0x12d687
+; GFX8-NEXT: s_add_i32 s4, s4, s5
+; GFX8-NEXT: s_add_i32 s4, s4, 0x12d687
+; GFX8-NEXT: s_mov_b32 s3, 0xf000
+; GFX8-NEXT: s_mov_b32 s2, -1
+; GFX8-NEXT: v_mov_b32_e32 v0, s4
+; GFX8-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: s_endpgm
%and = and i32 %a, 1234567
diff --git a/llvm/test/CodeGen/AMDGPU/bf16-math.ll b/llvm/test/CodeGen/AMDGPU/bf16-math.ll
index 30a7864..39618b0 100644
--- a/llvm/test/CodeGen/AMDGPU/bf16-math.ll
+++ b/llvm/test/CodeGen/AMDGPU/bf16-math.ll
@@ -368,7 +368,10 @@ define amdgpu_ps float @test_clamp_v2bf16_s(<2 x bfloat> inreg %src) {
define amdgpu_ps bfloat @test_clamp_bf16_folding(bfloat %src) {
; GCN-LABEL: test_clamp_bf16_folding:
; GCN: ; %bb.0:
-; GCN-NEXT: v_exp_bf16_e64 v0, v0 clamp
+; GCN-NEXT: v_exp_bf16_e32 v0, v0
+; GCN-NEXT: v_nop
+; GCN-NEXT: s_delay_alu instid0(TRANS32_DEP_1)
+; GCN-NEXT: v_pk_max_num_bf16 v0, v0, v0 clamp
; GCN-NEXT: ; return to shader part epilog
%exp = call bfloat @llvm.exp2.bf16(bfloat %src)
%max = call bfloat @llvm.maxnum.bf16(bfloat %exp, bfloat 0.0)
diff --git a/llvm/test/CodeGen/AMDGPU/bfe-patterns.ll b/llvm/test/CodeGen/AMDGPU/bfe-patterns.ll
index c14678c..c0d5f8a 100644
--- a/llvm/test/CodeGen/AMDGPU/bfe-patterns.ll
+++ b/llvm/test/CodeGen/AMDGPU/bfe-patterns.ll
@@ -120,17 +120,17 @@ define amdgpu_kernel void @s_ubfe_sub_i32(ptr addrspace(1) %out, i32 %src, i32 %
; SI-LABEL: s_ubfe_sub_i32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
-; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_sub_i32 s3, 32, s3
-; SI-NEXT: s_lshl_b32 s2, s2, s3
-; SI-NEXT: s_lshr_b32 s2, s2, s3
; SI-NEXT: v_mov_b32_e32 v1, 0
-; SI-NEXT: s_mov_b64 s[4:5], s[0:1]
-; SI-NEXT: v_mov_b32_e32 v2, s2
-; SI-NEXT: buffer_store_dword v2, v[0:1], s[4:7], 0 addr64
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_sub_i32 s2, 32, s5
+; SI-NEXT: s_lshl_b32 s4, s4, s2
+; SI-NEXT: s_lshr_b32 s4, s4, s2
+; SI-NEXT: s_mov_b32 s2, 0
+; SI-NEXT: v_mov_b32_e32 v2, s4
+; SI-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
; SI-NEXT: s_endpgm
;
; VI-LABEL: s_ubfe_sub_i32:
@@ -160,20 +160,20 @@ define amdgpu_kernel void @s_ubfe_sub_multi_use_shl_i32(ptr addrspace(1) %out, i
; SI-LABEL: s_ubfe_sub_multi_use_shl_i32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s6, 0
-; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
-; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_sub_i32 s3, 32, s3
-; SI-NEXT: s_lshl_b32 s2, s2, s3
-; SI-NEXT: s_lshr_b32 s3, s2, s3
; SI-NEXT: v_mov_b32_e32 v1, 0
-; SI-NEXT: s_mov_b64 s[4:5], s[0:1]
-; SI-NEXT: v_mov_b32_e32 v2, s3
-; SI-NEXT: buffer_store_dword v2, v[0:1], s[4:7], 0 addr64
-; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: v_mov_b32_e32 v0, s2
-; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_mov_b32 s2, 0
+; SI-NEXT: s_sub_i32 s3, 32, s5
+; SI-NEXT: s_lshl_b32 s4, s4, s3
+; SI-NEXT: s_lshr_b32 s5, s4, s3
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: v_mov_b32_e32 v2, s5
+; SI-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_endpgm
;
@@ -322,17 +322,17 @@ define amdgpu_kernel void @s_sbfe_sub_i32(ptr addrspace(1) %out, i32 %src, i32 %
; SI-LABEL: s_sbfe_sub_i32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
-; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_sub_i32 s3, 32, s3
-; SI-NEXT: s_lshl_b32 s2, s2, s3
-; SI-NEXT: s_ashr_i32 s2, s2, s3
; SI-NEXT: v_mov_b32_e32 v1, 0
-; SI-NEXT: s_mov_b64 s[4:5], s[0:1]
-; SI-NEXT: v_mov_b32_e32 v2, s2
-; SI-NEXT: buffer_store_dword v2, v[0:1], s[4:7], 0 addr64
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_sub_i32 s2, 32, s5
+; SI-NEXT: s_lshl_b32 s4, s4, s2
+; SI-NEXT: s_ashr_i32 s4, s4, s2
+; SI-NEXT: s_mov_b32 s2, 0
+; SI-NEXT: v_mov_b32_e32 v2, s4
+; SI-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
; SI-NEXT: s_endpgm
;
; VI-LABEL: s_sbfe_sub_i32:
@@ -362,20 +362,20 @@ define amdgpu_kernel void @s_sbfe_sub_multi_use_shl_i32(ptr addrspace(1) %out, i
; SI-LABEL: s_sbfe_sub_multi_use_shl_i32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s6, 0
-; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
-; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_sub_i32 s3, 32, s3
-; SI-NEXT: s_lshl_b32 s2, s2, s3
-; SI-NEXT: s_ashr_i32 s3, s2, s3
; SI-NEXT: v_mov_b32_e32 v1, 0
-; SI-NEXT: s_mov_b64 s[4:5], s[0:1]
-; SI-NEXT: v_mov_b32_e32 v2, s3
-; SI-NEXT: buffer_store_dword v2, v[0:1], s[4:7], 0 addr64
-; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: v_mov_b32_e32 v0, s2
-; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_mov_b32 s2, 0
+; SI-NEXT: s_sub_i32 s3, 32, s5
+; SI-NEXT: s_lshl_b32 s4, s4, s3
+; SI-NEXT: s_ashr_i32 s5, s4, s3
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: v_mov_b32_e32 v2, s5
+; SI-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_endpgm
;
diff --git a/llvm/test/CodeGen/AMDGPU/bfi_nested.ll b/llvm/test/CodeGen/AMDGPU/bfi_nested.ll
index bd76f34..7326adae 100644
--- a/llvm/test/CodeGen/AMDGPU/bfi_nested.ll
+++ b/llvm/test/CodeGen/AMDGPU/bfi_nested.ll
@@ -284,16 +284,15 @@ define amdgpu_kernel void @v_bfi_dont_applied_for_scalar_ops(ptr addrspace(1) %o
; GCN-LABEL: v_bfi_dont_applied_for_scalar_ops:
; GCN: ; %bb.0:
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GCN-NEXT: s_mov_b32 s7, 0xf000
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: s_and_b32 s3, s3, 0xffff0000
-; GCN-NEXT: s_and_b32 s2, s2, 0xffff
-; GCN-NEXT: s_or_b32 s2, s2, s3
-; GCN-NEXT: s_mov_b32 s6, -1
-; GCN-NEXT: s_mov_b32 s4, s0
-; GCN-NEXT: s_mov_b32 s5, s1
-; GCN-NEXT: v_mov_b32_e32 v0, s2
-; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GCN-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GCN-NEXT: s_mov_b32 s3, 0xf000
+; GCN-NEXT: s_and_b32 s2, s5, 0xffff0000
+; GCN-NEXT: s_and_b32 s4, s4, 0xffff
+; GCN-NEXT: s_or_b32 s4, s4, s2
+; GCN-NEXT: s_mov_b32 s2, -1
+; GCN-NEXT: v_mov_b32_e32 v0, s4
+; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GCN-NEXT: s_endpgm
%shift = lshr i32 %b, 16
%tr = trunc i32 %shift to i16
diff --git a/llvm/test/CodeGen/AMDGPU/bfm.ll b/llvm/test/CodeGen/AMDGPU/bfm.ll
index a12b5ea..172e07f 100644
--- a/llvm/test/CodeGen/AMDGPU/bfm.ll
+++ b/llvm/test/CodeGen/AMDGPU/bfm.ll
@@ -6,14 +6,13 @@ define amdgpu_kernel void @s_bfm_pattern(ptr addrspace(1) %out, i32 %x, i32 %y)
; SI-LABEL: s_bfm_pattern:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_bfm_b32 s2, s2, s3
-; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: v_mov_b32_e32 v0, s2
-; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_bfm_b32 s4, s4, s5
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: s_bfm_pattern:
diff --git a/llvm/test/CodeGen/AMDGPU/bitreverse.ll b/llvm/test/CodeGen/AMDGPU/bitreverse.ll
index d4f5617..e33b9ab 100644
--- a/llvm/test/CodeGen/AMDGPU/bitreverse.ll
+++ b/llvm/test/CodeGen/AMDGPU/bitreverse.ll
@@ -362,31 +362,29 @@ define amdgpu_kernel void @s_brev_v2i32(ptr addrspace(1) noalias %out, <2 x i32>
; SI-LABEL: s_brev_v2i32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: s_brev_b32 s0, s3
-; SI-NEXT: s_brev_b32 s1, s2
-; SI-NEXT: v_mov_b32_e32 v0, s1
-; SI-NEXT: v_mov_b32_e32 v1, s0
-; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_brev_b32 s5, s5
+; SI-NEXT: s_brev_b32 s4, s4
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: v_mov_b32_e32 v1, s5
+; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; FLAT-LABEL: s_brev_v2i32:
; FLAT: ; %bb.0:
; FLAT-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; FLAT-NEXT: s_mov_b32 s7, 0xf000
-; FLAT-NEXT: s_mov_b32 s6, -1
; FLAT-NEXT: s_waitcnt lgkmcnt(0)
-; FLAT-NEXT: s_mov_b32 s4, s0
-; FLAT-NEXT: s_mov_b32 s5, s1
-; FLAT-NEXT: s_brev_b32 s0, s3
-; FLAT-NEXT: s_brev_b32 s1, s2
-; FLAT-NEXT: v_mov_b32_e32 v0, s1
-; FLAT-NEXT: v_mov_b32_e32 v1, s0
-; FLAT-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; FLAT-NEXT: s_mov_b64 s[4:5], s[2:3]
+; FLAT-NEXT: s_brev_b32 s5, s5
+; FLAT-NEXT: s_brev_b32 s4, s4
+; FLAT-NEXT: s_mov_b32 s3, 0xf000
+; FLAT-NEXT: s_mov_b32 s2, -1
+; FLAT-NEXT: v_mov_b32_e32 v0, s4
+; FLAT-NEXT: v_mov_b32_e32 v1, s5
+; FLAT-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; FLAT-NEXT: s_endpgm
;
; GISEL-LABEL: s_brev_v2i32:
@@ -405,16 +403,14 @@ define amdgpu_kernel void @s_brev_v2i32(ptr addrspace(1) noalias %out, <2 x i32>
; GFX11-FLAT-LABEL: s_brev_v2i32:
; GFX11-FLAT: ; %bb.0:
; GFX11-FLAT-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX11-FLAT-NEXT: s_mov_b32 s7, 0x31016000
-; GFX11-FLAT-NEXT: s_mov_b32 s6, -1
; GFX11-FLAT-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FLAT-NEXT: s_brev_b32 s2, s2
; GFX11-FLAT-NEXT: s_brev_b32 s3, s3
; GFX11-FLAT-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FLAT-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
-; GFX11-FLAT-NEXT: s_mov_b32 s4, s0
-; GFX11-FLAT-NEXT: s_mov_b32 s5, s1
-; GFX11-FLAT-NEXT: buffer_store_b64 v[0:1], off, s[4:7], 0
+; GFX11-FLAT-NEXT: s_mov_b32 s3, 0x31016000
+; GFX11-FLAT-NEXT: s_mov_b32 s2, -1
+; GFX11-FLAT-NEXT: buffer_store_b64 v[0:1], off, s[0:3], 0
; GFX11-FLAT-NEXT: s_endpgm
;
; GFX11-GISEL-LABEL: s_brev_v2i32:
diff --git a/llvm/test/CodeGen/AMDGPU/build_vector.ll b/llvm/test/CodeGen/AMDGPU/build_vector.ll
index 763f436..fbaaef0 100644
--- a/llvm/test/CodeGen/AMDGPU/build_vector.ll
+++ b/llvm/test/CodeGen/AMDGPU/build_vector.ll
@@ -255,16 +255,15 @@ define amdgpu_kernel void @build_v2i32_from_v4i16_shuffle(ptr addrspace(1) %out,
; GFX6-LABEL: build_v2i32_from_v4i16_shuffle:
; GFX6: ; %bb.0: ; %entry
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: s_mov_b32 s4, s0
-; GFX6-NEXT: s_mov_b32 s5, s1
-; GFX6-NEXT: s_lshl_b32 s0, s3, 16
-; GFX6-NEXT: s_lshl_b32 s1, s2, 16
-; GFX6-NEXT: v_mov_b32_e32 v0, s1
-; GFX6-NEXT: v_mov_b32_e32 v1, s0
-; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT: s_lshl_b32 s5, s5, 16
+; GFX6-NEXT: s_lshl_b32 s4, s4, 16
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
+; GFX6-NEXT: s_mov_b32 s2, -1
+; GFX6-NEXT: v_mov_b32_e32 v0, s4
+; GFX6-NEXT: v_mov_b32_e32 v1, s5
+; GFX6-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX8-LABEL: build_v2i32_from_v4i16_shuffle:
diff --git a/llvm/test/CodeGen/AMDGPU/cc-entry.ll b/llvm/test/CodeGen/AMDGPU/cc-entry.ll
new file mode 100644
index 0000000..d807f32
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/cc-entry.ll
@@ -0,0 +1,69 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck %s
+
+define amdgpu_kernel void @entry_fn() {
+; CHECK-LABEL: entry_fn:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_getpc_b64 s[4:5]
+; CHECK-NEXT: s_sext_i32_i16 s5, s5
+; CHECK-NEXT: s_add_co_u32 s4, s4, entry_fn@gotpcrel32@lo+8
+; CHECK-NEXT: s_add_co_ci_u32 s5, s5, entry_fn@gotpcrel32@hi+16
+; CHECK-NEXT: s_mov_b32 s32, 0
+; CHECK-NEXT: s_load_b64 s[4:5], s[4:5], 0x0
+; CHECK-NEXT: s_wait_kmcnt 0x0
+; CHECK-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; CHECK-NEXT: s_endpgm
+entry:
+ call void @entry_fn()
+ ret void
+}
+
+define void @caller() {
+; CHECK-LABEL: caller:
+; CHECK: ; %bb.0: ; %entry
+; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
+; CHECK-NEXT: s_wait_expcnt 0x0
+; CHECK-NEXT: s_wait_samplecnt 0x0
+; CHECK-NEXT: s_wait_bvhcnt 0x0
+; CHECK-NEXT: s_wait_kmcnt 0x0
+; CHECK-NEXT: s_mov_b32 s0, s33
+; CHECK-NEXT: s_mov_b32 s33, s32
+; CHECK-NEXT: s_or_saveexec_b32 s1, -1
+; CHECK-NEXT: scratch_store_b32 off, v40, s33 ; 4-byte Folded Spill
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_mov_b32 exec_lo, s1
+; CHECK-NEXT: s_add_co_i32 s32, s32, 16
+; CHECK-NEXT: v_writelane_b32 v40, s0, 2
+; CHECK-NEXT: s_mov_b64 s[0:1], s[4:5]
+; CHECK-NEXT: s_getpc_b64 s[4:5]
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_sext_i32_i16 s5, s5
+; CHECK-NEXT: s_add_co_u32 s4, s4, entry_fn@gotpcrel32@lo+12
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_add_co_ci_u32 s5, s5, entry_fn@gotpcrel32@hi+24
+; CHECK-NEXT: v_mov_b32_e32 v0, v31
+; CHECK-NEXT: s_load_b64 s[4:5], s[4:5], 0x0
+; CHECK-NEXT: v_writelane_b32 v40, s30, 0
+; CHECK-NEXT: s_mov_b64 s[2:3], s[6:7]
+; CHECK-NEXT: s_mov_b64 s[6:7], s[10:11]
+; CHECK-NEXT: v_writelane_b32 v40, s31, 1
+; CHECK-NEXT: s_wait_kmcnt 0x0
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_swappc_b64 s[30:31], s[4:5]
+; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; CHECK-NEXT: v_readlane_b32 s31, v40, 1
+; CHECK-NEXT: v_readlane_b32 s30, v40, 0
+; CHECK-NEXT: s_mov_b32 s32, s33
+; CHECK-NEXT: v_readlane_b32 s0, v40, 2
+; CHECK-NEXT: s_or_saveexec_b32 s1, -1
+; CHECK-NEXT: scratch_load_b32 v40, off, s33 ; 4-byte Folded Reload
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_mov_b32 exec_lo, s1
+; CHECK-NEXT: s_mov_b32 s33, s0
+; CHECK-NEXT: s_wait_loadcnt 0x0
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+entry:
+ call void @entry_fn()
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/combine-cond-add-sub.ll b/llvm/test/CodeGen/AMDGPU/combine-cond-add-sub.ll
index 3d315f8..4cbd41c 100644
--- a/llvm/test/CodeGen/AMDGPU/combine-cond-add-sub.ll
+++ b/llvm/test/CodeGen/AMDGPU/combine-cond-add-sub.ll
@@ -647,20 +647,20 @@ define amdgpu_kernel void @sub_zext_setcc_commute(ptr addrspace(1) nocapture %ar
; GCN-LABEL: sub_zext_setcc_commute:
; GCN: ; %bb.0: ; %bb
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GCN-NEXT: s_mov_b32 s7, 0xf000
-; GCN-NEXT: s_mov_b32 s6, 0
; GCN-NEXT: v_lshlrev_b32_e32 v2, 2, v0
-; GCN-NEXT: v_mov_b32_e32 v3, 0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: s_mov_b64 s[4:5], s[0:1]
-; GCN-NEXT: buffer_load_dword v4, v[2:3], s[4:7], 0 addr64
+; GCN-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GCN-NEXT: s_mov_b32 s3, 0xf000
+; GCN-NEXT: s_mov_b32 s2, 0
+; GCN-NEXT: v_mov_b32_e32 v3, 0
+; GCN-NEXT: buffer_load_dword v4, v[2:3], s[0:3], 0 addr64
; GCN-NEXT: v_cmp_gt_u32_e32 vcc, v0, v1
; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_sub_i32_e32 v0, vcc, v0, v4
-; GCN-NEXT: v_add_i32_e32 v0, vcc, s2, v0
-; GCN-NEXT: v_subrev_i32_e32 v0, vcc, s3, v0
-; GCN-NEXT: buffer_store_dword v0, v[2:3], s[4:7], 0 addr64
+; GCN-NEXT: v_add_i32_e32 v0, vcc, s4, v0
+; GCN-NEXT: v_subrev_i32_e32 v0, vcc, s5, v0
+; GCN-NEXT: buffer_store_dword v0, v[2:3], s[0:3], 0 addr64
; GCN-NEXT: s_endpgm
;
; GFX9-LABEL: sub_zext_setcc_commute:
@@ -696,20 +696,20 @@ define amdgpu_kernel void @sub_sext_setcc_commute(ptr addrspace(1) nocapture %ar
; GCN-LABEL: sub_sext_setcc_commute:
; GCN: ; %bb.0: ; %bb
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GCN-NEXT: s_mov_b32 s7, 0xf000
-; GCN-NEXT: s_mov_b32 s6, 0
; GCN-NEXT: v_lshlrev_b32_e32 v2, 2, v0
-; GCN-NEXT: v_mov_b32_e32 v3, 0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: s_mov_b64 s[4:5], s[0:1]
-; GCN-NEXT: buffer_load_dword v4, v[2:3], s[4:7], 0 addr64
+; GCN-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GCN-NEXT: s_mov_b32 s3, 0xf000
+; GCN-NEXT: s_mov_b32 s2, 0
+; GCN-NEXT: v_mov_b32_e32 v3, 0
+; GCN-NEXT: buffer_load_dword v4, v[2:3], s[0:3], 0 addr64
; GCN-NEXT: v_cmp_gt_u32_e32 vcc, v0, v1
; GCN-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_sub_i32_e32 v0, vcc, v0, v4
-; GCN-NEXT: v_add_i32_e32 v0, vcc, s2, v0
-; GCN-NEXT: v_subrev_i32_e32 v0, vcc, s3, v0
-; GCN-NEXT: buffer_store_dword v0, v[2:3], s[4:7], 0 addr64
+; GCN-NEXT: v_add_i32_e32 v0, vcc, s4, v0
+; GCN-NEXT: v_subrev_i32_e32 v0, vcc, s5, v0
+; GCN-NEXT: buffer_store_dword v0, v[2:3], s[0:3], 0 addr64
; GCN-NEXT: s_endpgm
;
; GFX9-LABEL: sub_sext_setcc_commute:
diff --git a/llvm/test/CodeGen/AMDGPU/compute-known-bits-nofpclass.ll b/llvm/test/CodeGen/AMDGPU/compute-known-bits-nofpclass.ll
new file mode 100644
index 0000000..244c3f7c
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/compute-known-bits-nofpclass.ll
@@ -0,0 +1,46 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck %s
+
+define i32 @known_positive(float nofpclass(nan ninf nzero nsub nnorm) %signbit.zero) #0 {
+; CHECK-LABEL: known_positive:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %cast = bitcast float %signbit.zero to i32
+ %and = and i32 %cast, 2147483647
+ ret i32 %and
+}
+
+define i32 @known_positive_maybe_nan(float nofpclass(ninf nzero nsub nnorm) %signbit.zero) #0 {
+; CHECK-LABEL: known_positive_maybe_nan:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %cast = bitcast float %signbit.zero to i32
+ %and = and i32 %cast, 2147483647
+ ret i32 %and
+}
+
+define i32 @known_negative(float nofpclass(nan pinf pzero psub pnorm) %signbit.one) #0 {
+; CHECK-LABEL: known_negative:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %cast = bitcast float %signbit.one to i32
+ %or = or i32 %cast, -2147483648
+ ret i32 %or
+}
+
+define i32 @known_negative_maybe_nan(float nofpclass(pinf pzero psub pnorm) %signbit.one) #0 {
+; CHECK-LABEL: known_negative_maybe_nan:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_or_b32_e32 v0, 0x80000000, v0
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %cast = bitcast float %signbit.one to i32
+ %or = or i32 %cast, -2147483648
+ ret i32 %or
+}
+
+attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/copysign-simplify-demanded-bits.ll b/llvm/test/CodeGen/AMDGPU/copysign-simplify-demanded-bits.ll
index f5227ee..ef676dd 100644
--- a/llvm/test/CodeGen/AMDGPU/copysign-simplify-demanded-bits.ll
+++ b/llvm/test/CodeGen/AMDGPU/copysign-simplify-demanded-bits.ll
@@ -345,15 +345,13 @@ define float @test_copysign_pow_fast_f32__integral_y(float %x, i32 %y.i) {
; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, s4, v3
; GFX9-NEXT: v_cndmask_b32_e32 v3, 0, v4, vcc
; GFX9-NEXT: v_fma_f32 v2, v2, v1, v3
-; GFX9-NEXT: v_cvt_i32_f32_e32 v1, v1
; GFX9-NEXT: v_exp_f32_e32 v2, v2
+; GFX9-NEXT: v_cvt_i32_f32_e32 v1, v1
; GFX9-NEXT: v_not_b32_e32 v3, 63
; GFX9-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 31, v1
; GFX9-NEXT: v_ldexp_f32 v2, v2, v3
-; GFX9-NEXT: v_and_b32_e32 v0, v1, v0
-; GFX9-NEXT: s_brev_b32 s4, -2
-; GFX9-NEXT: v_bfi_b32 v0, s4, v2, v0
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 31, v1
+; GFX9-NEXT: v_and_or_b32 v0, v1, v0, v2
; GFX9-NEXT: s_setpc_b64 s[30:31]
%y = sitofp i32 %y.i to float
%y.fptosi = fptosi float %y to i32
@@ -370,4 +368,109 @@ define float @test_copysign_pow_fast_f32__integral_y(float %x, i32 %y.i) {
ret float %pow_sign1
}
+define double @test_pow_fast_f64integral_y(double %x, i32 %y.i) #0 {
+; GFX9-LABEL: test_pow_fast_f64integral_y:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_mov_b32 s16, s33
+; GFX9-NEXT: s_mov_b32 s33, s32
+; GFX9-NEXT: s_or_saveexec_b64 s[18:19], -1
+; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s33 offset:12 ; 4-byte Folded Spill
+; GFX9-NEXT: s_mov_b64 exec, s[18:19]
+; GFX9-NEXT: v_writelane_b32 v43, s16, 14
+; GFX9-NEXT: v_writelane_b32 v43, s30, 0
+; GFX9-NEXT: v_writelane_b32 v43, s31, 1
+; GFX9-NEXT: v_writelane_b32 v43, s34, 2
+; GFX9-NEXT: v_writelane_b32 v43, s35, 3
+; GFX9-NEXT: v_writelane_b32 v43, s36, 4
+; GFX9-NEXT: v_writelane_b32 v43, s37, 5
+; GFX9-NEXT: v_writelane_b32 v43, s38, 6
+; GFX9-NEXT: v_writelane_b32 v43, s39, 7
+; GFX9-NEXT: v_writelane_b32 v43, s48, 8
+; GFX9-NEXT: v_writelane_b32 v43, s49, 9
+; GFX9-NEXT: v_writelane_b32 v43, s50, 10
+; GFX9-NEXT: s_addk_i32 s32, 0x800
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s33 offset:8 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s33 offset:4 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s33 ; 4-byte Folded Spill
+; GFX9-NEXT: v_writelane_b32 v43, s51, 11
+; GFX9-NEXT: v_mov_b32_e32 v42, v1
+; GFX9-NEXT: v_writelane_b32 v43, s52, 12
+; GFX9-NEXT: v_and_b32_e32 v1, 0x7fffffff, v42
+; GFX9-NEXT: s_getpc_b64 s[16:17]
+; GFX9-NEXT: s_add_u32 s16, s16, _Z4log2d@rel32@lo+4
+; GFX9-NEXT: s_addc_u32 s17, s17, _Z4log2d@rel32@hi+12
+; GFX9-NEXT: v_writelane_b32 v43, s53, 13
+; GFX9-NEXT: v_mov_b32_e32 v40, v31
+; GFX9-NEXT: v_mov_b32_e32 v41, v2
+; GFX9-NEXT: s_mov_b32 s50, s15
+; GFX9-NEXT: s_mov_b32 s51, s14
+; GFX9-NEXT: s_mov_b32 s52, s13
+; GFX9-NEXT: s_mov_b32 s53, s12
+; GFX9-NEXT: s_mov_b64 s[34:35], s[10:11]
+; GFX9-NEXT: s_mov_b64 s[36:37], s[8:9]
+; GFX9-NEXT: s_mov_b64 s[38:39], s[6:7]
+; GFX9-NEXT: s_mov_b64 s[48:49], s[4:5]
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-NEXT: v_cvt_f64_i32_e32 v[2:3], v41
+; GFX9-NEXT: s_getpc_b64 s[16:17]
+; GFX9-NEXT: s_add_u32 s16, s16, _Z4exp2d@rel32@lo+4
+; GFX9-NEXT: s_addc_u32 s17, s17, _Z4exp2d@rel32@hi+12
+; GFX9-NEXT: s_mov_b64 s[4:5], s[48:49]
+; GFX9-NEXT: s_mov_b64 s[6:7], s[38:39]
+; GFX9-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3]
+; GFX9-NEXT: s_mov_b64 s[8:9], s[36:37]
+; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
+; GFX9-NEXT: s_mov_b32 s12, s53
+; GFX9-NEXT: s_mov_b32 s13, s52
+; GFX9-NEXT: s_mov_b32 s14, s51
+; GFX9-NEXT: s_mov_b32 s15, s50
+; GFX9-NEXT: v_mov_b32_e32 v31, v40
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 31, v41
+; GFX9-NEXT: v_and_b32_e32 v2, v2, v42
+; GFX9-NEXT: buffer_load_dword v42, off, s[0:3], s33 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s33 offset:4 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v40, off, s[0:3], s33 offset:8 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX9-NEXT: v_readlane_b32 s53, v43, 13
+; GFX9-NEXT: v_readlane_b32 s52, v43, 12
+; GFX9-NEXT: v_readlane_b32 s51, v43, 11
+; GFX9-NEXT: v_readlane_b32 s50, v43, 10
+; GFX9-NEXT: v_readlane_b32 s49, v43, 9
+; GFX9-NEXT: v_readlane_b32 s48, v43, 8
+; GFX9-NEXT: v_readlane_b32 s39, v43, 7
+; GFX9-NEXT: v_readlane_b32 s38, v43, 6
+; GFX9-NEXT: v_readlane_b32 s37, v43, 5
+; GFX9-NEXT: v_readlane_b32 s36, v43, 4
+; GFX9-NEXT: v_readlane_b32 s35, v43, 3
+; GFX9-NEXT: v_readlane_b32 s34, v43, 2
+; GFX9-NEXT: v_readlane_b32 s31, v43, 1
+; GFX9-NEXT: v_readlane_b32 s30, v43, 0
+; GFX9-NEXT: s_mov_b32 s32, s33
+; GFX9-NEXT: v_readlane_b32 s4, v43, 14
+; GFX9-NEXT: s_or_saveexec_b64 s[6:7], -1
+; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s33 offset:12 ; 4-byte Folded Reload
+; GFX9-NEXT: s_mov_b64 exec, s[6:7]
+; GFX9-NEXT: s_mov_b32 s33, s4
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %fabs = call fast double @llvm.fabs.f64(double %x)
+ %log2 = call fast double @_Z4log2d(double %fabs)
+ %pownI2F = sitofp i32 %y.i to double
+ %ylogx = fmul fast double %log2, %pownI2F
+ %exp2 = call fast nofpclass(nan ninf nzero nsub nnorm) double @_Z4exp2d(double %ylogx)
+ %ytou = zext i32 %y.i to i64
+ %yeven = shl i64 %ytou, 63
+ %x.i64 = bitcast double %x to i64
+ %pow_sign = and i64 %yeven, %x.i64
+ %pow_sign.f64 = bitcast i64 %pow_sign to double
+ %pow_sign1 = call fast double @llvm.copysign.f64(double %exp2, double %pow_sign.f64)
+ ret double %pow_sign1
+}
+
+declare hidden double @_Z4exp2d(double) #1
+declare hidden double @_Z4log2d(double) #1
+
attributes #0 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
+attributes #1 = { norecurse nounwind memory(read) }
diff --git a/llvm/test/CodeGen/AMDGPU/copysign-to-disjoint-or-combine.ll b/llvm/test/CodeGen/AMDGPU/copysign-to-disjoint-or-combine.ll
new file mode 100644
index 0000000..afd610f
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/copysign-to-disjoint-or-combine.ll
@@ -0,0 +1,198 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck -check-prefix=GFX9 %s
+
+; Negative test, don't know %x is positive
+define half @copysign_known_signmask_f16(half %x, i16 %sign) {
+; GFX9-LABEL: copysign_known_signmask_f16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshlrev_b16_e32 v1, 15, v1
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
+; GFX9-NEXT: v_bfi_b32 v0, s4, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %signmask = shl i16 %sign, 15
+ %signmask.bitcast = bitcast i16 %signmask to half
+ %result = call half @llvm.copysign.f16(half %x, half %signmask.bitcast)
+ ret half %result
+}
+
+; Negative test, don't know %x is positive
+define float @copysign_known_signmask_f32(float %x, i32 %sign) {
+; GFX9-LABEL: copysign_known_signmask_f32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 31, v1
+; GFX9-NEXT: s_brev_b32 s4, -2
+; GFX9-NEXT: v_bfi_b32 v0, s4, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %signmask = shl i32 %sign, 31
+ %signmask.bitcast = bitcast i32 %signmask to float
+ %result = call float @llvm.copysign.f32(float %x, float %signmask.bitcast)
+ ret float %result
+}
+
+; Negative test, don't know %x is positive
+define double @copysign_known_signmask_f64(double %x, i64 %sign) {
+; GFX9-LABEL: copysign_known_signmask_f64:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 31, v2
+; GFX9-NEXT: s_brev_b32 s4, -2
+; GFX9-NEXT: v_bfi_b32 v1, s4, v1, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %signmask = shl i64 %sign, 63
+ %signmask.bitcast = bitcast i64 %signmask to double
+ %result = call double @llvm.copysign.f64(double %x, double %signmask.bitcast)
+ ret double %result
+}
+
+; Negative test, don't know %x is positive
+define float @copysign_known_signmask_f32_known_not_known_positive_mag_maybe_nan(float nofpclass(ninf nzero nsub nnorm) %sign.bit.known.zero, i32 %sign) {
+; GFX9-LABEL: copysign_known_signmask_f32_known_not_known_positive_mag_maybe_nan:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 31, v1
+; GFX9-NEXT: s_brev_b32 s4, -2
+; GFX9-NEXT: v_bfi_b32 v0, s4, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %signmask = shl i32 %sign, 31
+ %signmask.bitcast = bitcast i32 %signmask to float
+ %result = call float @llvm.copysign.f32(float %sign.bit.known.zero, float %signmask.bitcast)
+ ret float %result
+}
+
+; Negative test, don't know %x is positive
+define float @copysign_known_signmask_f32_known_not_known_positive_mag_maybe_negzero(float nofpclass(nan ninf nsub nnorm) %sign.bit.known.zero, i32 %sign) {
+; GFX9-LABEL: copysign_known_signmask_f32_known_not_known_positive_mag_maybe_negzero:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 31, v1
+; GFX9-NEXT: s_brev_b32 s4, -2
+; GFX9-NEXT: v_bfi_b32 v0, s4, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %signmask = shl i32 %sign, 31
+ %signmask.bitcast = bitcast i32 %signmask to float
+ %result = call float @llvm.copysign.f32(float %sign.bit.known.zero, float %signmask.bitcast)
+ ret float %result
+}
+
+define half @copysign_known_signmask_f16_known_positive_mag(half nofpclass(nan ninf nzero nsub nnorm) %sign.bit.known.zero, i16 %sign) {
+; GFX9-LABEL: copysign_known_signmask_f16_known_positive_mag:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshlrev_b16_e32 v1, 15, v1
+; GFX9-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %signmask = shl i16 %sign, 15
+ %signmask.bitcast = bitcast i16 %signmask to half
+ %result = call half @llvm.copysign.f16(half %sign.bit.known.zero, half %signmask.bitcast)
+ ret half %result
+}
+
+define float @copysign_known_signmask_f32_known_positive_mag(float nofpclass(nan ninf nzero nsub nnorm) %sign.bit.known.zero, i32 %sign) {
+; GFX9-LABEL: copysign_known_signmask_f32_known_positive_mag:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshl_or_b32 v0, v1, 31, v0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %signmask = shl i32 %sign, 31
+ %signmask.bitcast = bitcast i32 %signmask to float
+ %result = call float @llvm.copysign.f32(float %sign.bit.known.zero, float %signmask.bitcast)
+ ret float %result
+}
+
+define double @copysign_known_signmask_f64_known_positive_mag(double nofpclass(nan ninf nzero nsub nnorm) %sign.bit.known.zero, i64 %sign) {
+; GFX9-LABEL: copysign_known_signmask_f64_known_positive_mag:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 31, v2
+; GFX9-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %signmask = shl i64 %sign, 63
+ %signmask.bitcast = bitcast i64 %signmask to double
+ %result = call double @llvm.copysign.f64(double %sign.bit.known.zero, double %signmask.bitcast)
+ ret double %result
+}
+
+; exp always returns a positive result, excluding the unknown nan sign
+; bit.
+define float @copysign_known_signmask_f32_known_positive_mag__nnan_exp(float %x, i32 %sign) {
+; GFX9-LABEL: copysign_known_signmask_f32_known_positive_mag__nnan_exp:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_mov_b32 s4, 0xc2aeac50
+; GFX9-NEXT: v_add_f32_e32 v2, 0x42800000, v0
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX9-NEXT: v_mul_f32_e32 v0, 0x3fb8aa3b, v0
+; GFX9-NEXT: v_exp_f32_e32 v0, v0
+; GFX9-NEXT: v_mul_f32_e32 v2, 0x114b4ea4, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX9-NEXT: v_lshl_or_b32 v0, v1, 31, v0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %signbit.known.zero = call nnan afn float @llvm.exp.f32(float %x)
+ %signmask = shl i32 %sign, 31
+ %signmask.bitcast = bitcast i32 %signmask to float
+ %result = call float @llvm.copysign.f32(float %signbit.known.zero, float %signmask.bitcast)
+ ret float %result
+}
+
+define float @copysign_known_signmask_f32_known_positive_mag__nnan_exp2(float %x, i32 %sign) {
+; GFX9-LABEL: copysign_known_signmask_f32_known_positive_mag__nnan_exp2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_mov_b32 s4, 0xc2fc0000
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
+; GFX9-NEXT: v_mov_b32_e32 v3, 0x42800000
+; GFX9-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc
+; GFX9-NEXT: v_add_f32_e32 v0, v0, v3
+; GFX9-NEXT: v_exp_f32_e32 v0, v0
+; GFX9-NEXT: v_not_b32_e32 v2, 63
+; GFX9-NEXT: v_cndmask_b32_e32 v2, 0, v2, vcc
+; GFX9-NEXT: v_ldexp_f32 v0, v0, v2
+; GFX9-NEXT: v_lshl_or_b32 v0, v1, 31, v0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %signbit.known.zero = call nnan afn float @llvm.exp2.f32(float %x)
+ %signmask = shl i32 %sign, 31
+ %signmask.bitcast = bitcast i32 %signmask to float
+ %result = call float @llvm.copysign.f32(float %signbit.known.zero, float %signmask.bitcast)
+ ret float %result
+}
+
+define float @copysign_known_signmask_f32_known_positive_mag__nnan_exp10(float %x, i32 %sign) {
+; GFX9-LABEL: copysign_known_signmask_f32_known_positive_mag__nnan_exp10:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_mov_b32 s4, 0xc2fc0000
+; GFX9-NEXT: v_cmp_gt_f32_e32 vcc, s4, v0
+; GFX9-NEXT: v_mov_b32_e32 v3, 0x42800000
+; GFX9-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc
+; GFX9-NEXT: v_add_f32_e32 v0, v0, v3
+; GFX9-NEXT: v_exp_f32_e32 v0, v0
+; GFX9-NEXT: v_not_b32_e32 v2, 63
+; GFX9-NEXT: v_cndmask_b32_e32 v2, 0, v2, vcc
+; GFX9-NEXT: v_ldexp_f32 v0, v0, v2
+; GFX9-NEXT: v_lshl_or_b32 v0, v1, 31, v0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %signbit.known.zero = call nnan afn float @llvm.exp2.f32(float %x)
+ %signmask = shl i32 %sign, 31
+ %signmask.bitcast = bitcast i32 %signmask to float
+ %result = call float @llvm.copysign.f32(float %signbit.known.zero, float %signmask.bitcast)
+ ret float %result
+}
+
+define float @copysign_known_signmask_f32_known_positive_mag_through_fence(float nofpclass(nan ninf nzero nsub nnorm) %sign.bit.known.zero, i32 %sign) {
+; GFX9-LABEL: copysign_known_signmask_f32_known_positive_mag_through_fence:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 31, v1
+; GFX9-NEXT: ;ARITH_FENCE
+; GFX9-NEXT: s_brev_b32 s4, -2
+; GFX9-NEXT: v_bfi_b32 v0, s4, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %signmask = shl i32 %sign, 31
+ %signmask.bitcast = bitcast i32 %signmask to float
+ %fence = call float @llvm.arithmetic.fence.f32(float %sign.bit.known.zero)
+ %result = call float @llvm.copysign.f32(float %fence, float %signmask.bitcast)
+ ret float %result
+}
diff --git a/llvm/test/CodeGen/AMDGPU/divergence-driven-buildvector.ll b/llvm/test/CodeGen/AMDGPU/divergence-driven-buildvector.ll
index ab96dcf..08545b9 100644
--- a/llvm/test/CodeGen/AMDGPU/divergence-driven-buildvector.ll
+++ b/llvm/test/CodeGen/AMDGPU/divergence-driven-buildvector.ll
@@ -390,16 +390,15 @@ define amdgpu_kernel void @uniform_vec_i16_LH(ptr addrspace(1) %out, i16 %a, i32
; GCN-LABEL: uniform_vec_i16_LH:
; GCN: ; %bb.0:
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GCN-NEXT: s_mov_b32 s7, 0xf000
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: s_and_b32 s3, s3, 0xffff0000
-; GCN-NEXT: s_and_b32 s2, s2, 0xffff
-; GCN-NEXT: s_or_b32 s2, s2, s3
-; GCN-NEXT: s_mov_b32 s6, -1
-; GCN-NEXT: s_mov_b32 s4, s0
-; GCN-NEXT: s_mov_b32 s5, s1
-; GCN-NEXT: v_mov_b32_e32 v0, s2
-; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GCN-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GCN-NEXT: s_mov_b32 s3, 0xf000
+; GCN-NEXT: s_and_b32 s2, s5, 0xffff0000
+; GCN-NEXT: s_and_b32 s4, s4, 0xffff
+; GCN-NEXT: s_or_b32 s4, s4, s2
+; GCN-NEXT: s_mov_b32 s2, -1
+; GCN-NEXT: v_mov_b32_e32 v0, s4
+; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GCN-NEXT: s_endpgm
;
; GFX9-LABEL: uniform_vec_i16_LH:
diff --git a/llvm/test/CodeGen/AMDGPU/divergence-driven-sext-inreg.ll b/llvm/test/CodeGen/AMDGPU/divergence-driven-sext-inreg.ll
index 4c3fd40..d8f9bc1 100644
--- a/llvm/test/CodeGen/AMDGPU/divergence-driven-sext-inreg.ll
+++ b/llvm/test/CodeGen/AMDGPU/divergence-driven-sext-inreg.ll
@@ -5,15 +5,14 @@ define amdgpu_kernel void @uniform_sext_in_reg_i8_to_i32(ptr addrspace(1) %out,
; GCN-LABEL: uniform_sext_in_reg_i8_to_i32:
; GCN: ; %bb.0:
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GCN-NEXT: s_mov_b32 s7, 0xf000
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: s_add_i32 s2, s2, s3
-; GCN-NEXT: s_sext_i32_i8 s2, s2
-; GCN-NEXT: s_mov_b32 s6, -1
-; GCN-NEXT: s_mov_b32 s4, s0
-; GCN-NEXT: s_mov_b32 s5, s1
-; GCN-NEXT: v_mov_b32_e32 v0, s2
-; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GCN-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GCN-NEXT: s_mov_b32 s3, 0xf000
+; GCN-NEXT: s_add_i32 s2, s4, s5
+; GCN-NEXT: s_sext_i32_i8 s4, s2
+; GCN-NEXT: s_mov_b32 s2, -1
+; GCN-NEXT: v_mov_b32_e32 v0, s4
+; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GCN-NEXT: s_endpgm
%c = add i32 %a, %b ; add to prevent folding into extload
%shl = shl i32 %c, 24
@@ -26,15 +25,14 @@ define amdgpu_kernel void @divergent_sext_in_reg_i8_to_i32(ptr addrspace(1) %out
; GCN-LABEL: divergent_sext_in_reg_i8_to_i32:
; GCN: ; %bb.0:
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GCN-NEXT: s_mov_b32 s7, 0xf000
-; GCN-NEXT: s_mov_b32 s6, -1
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: s_mov_b32 s4, s0
-; GCN-NEXT: s_mov_b32 s5, s1
-; GCN-NEXT: s_add_i32 s0, s2, s3
-; GCN-NEXT: v_add_i32_e32 v0, vcc, s0, v0
+; GCN-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GCN-NEXT: s_mov_b32 s3, 0xf000
+; GCN-NEXT: s_mov_b32 s2, -1
+; GCN-NEXT: s_add_i32 s4, s4, s5
+; GCN-NEXT: v_add_i32_e32 v0, vcc, s4, v0
; GCN-NEXT: v_bfe_i32 v0, v0, 0, 8
-; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GCN-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%c = add i32 %a, %b ; add to prevent folding into extload
@@ -49,15 +47,14 @@ define amdgpu_kernel void @uniform_sext_in_reg_i16_to_i32(ptr addrspace(1) %out,
; GCN-LABEL: uniform_sext_in_reg_i16_to_i32:
; GCN: ; %bb.0:
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GCN-NEXT: s_mov_b32 s7, 0xf000
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: s_add_i32 s2, s2, s3
-; GCN-NEXT: s_sext_i32_i16 s2, s2
-; GCN-NEXT: s_mov_b32 s6, -1
-; GCN-NEXT: s_mov_b32 s4, s0
-; GCN-NEXT: s_mov_b32 s5, s1
-; GCN-NEXT: v_mov_b32_e32 v0, s2
-; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GCN-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GCN-NEXT: s_mov_b32 s3, 0xf000
+; GCN-NEXT: s_add_i32 s2, s4, s5
+; GCN-NEXT: s_sext_i32_i16 s4, s2
+; GCN-NEXT: s_mov_b32 s2, -1
+; GCN-NEXT: v_mov_b32_e32 v0, s4
+; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GCN-NEXT: s_endpgm
%c = add i32 %a, %b ; add to prevent folding into extload
%shl = shl i32 %c, 16
@@ -70,15 +67,14 @@ define amdgpu_kernel void @divergent_sext_in_reg_i16_to_i32(ptr addrspace(1) %ou
; GCN-LABEL: divergent_sext_in_reg_i16_to_i32:
; GCN: ; %bb.0:
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GCN-NEXT: s_mov_b32 s7, 0xf000
-; GCN-NEXT: s_mov_b32 s6, -1
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: s_mov_b32 s4, s0
-; GCN-NEXT: s_mov_b32 s5, s1
-; GCN-NEXT: s_add_i32 s0, s2, s3
-; GCN-NEXT: v_add_i32_e32 v0, vcc, s0, v0
+; GCN-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GCN-NEXT: s_mov_b32 s3, 0xf000
+; GCN-NEXT: s_mov_b32 s2, -1
+; GCN-NEXT: s_add_i32 s4, s4, s5
+; GCN-NEXT: v_add_i32_e32 v0, vcc, s4, v0
; GCN-NEXT: v_bfe_i32 v0, v0, 0, 16
-; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GCN-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%c = add i32 %a, %b ; add to prevent folding into extload
diff --git a/llvm/test/CodeGen/AMDGPU/ds-read2-write2-debug-info.ll b/llvm/test/CodeGen/AMDGPU/ds-read2-write2-debug-info.ll
new file mode 100644
index 0000000..08730038
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/ds-read2-write2-debug-info.ll
@@ -0,0 +1,89 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -passes=debugify < %s | llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 | FileCheck %s
+
+@lds = addrspace(3) global [512 x float] poison, align 4
+
+define amdgpu_kernel void @simple_write2_one_val_f32(ptr addrspace(1) %C, ptr addrspace(1) %in) #0 {
+; CHECK-LABEL: simple_write2_one_val_f32:
+; CHECK: .Lfunc_begin0:
+; CHECK-NEXT: .cfi_sections .debug_frame
+; CHECK-NEXT: .cfi_startproc
+; CHECK-NEXT: ; %bb.0:
+; CHECK-NEXT: .file 1 "/" "<stdin>"
+; CHECK-NEXT: .loc 1 1 1 prologue_end ; <stdin>:1:1
+; CHECK-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x8
+; CHECK-NEXT: .Ltmp0:
+; CHECK-NEXT: ;DEBUG_VALUE: simple_write2_one_val_f32:1 <- $vgpr0
+; CHECK-NEXT: ;DEBUG_VALUE: simple_write2_one_val_f32:5 <- [DW_OP_plus_uconst 8, DW_OP_stack_value] $vgpr0
+; CHECK-NEXT: ;DEBUG_VALUE: simple_write2_one_val_f32:3 <- undef
+; CHECK-NEXT: .loc 1 2 1 ; <stdin>:2:1
+; CHECK-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; CHECK-NEXT: .Ltmp1:
+; CHECK-NEXT: ;DEBUG_VALUE: simple_write2_one_val_f32:4 <- $vgpr0
+; CHECK-NEXT: ;DEBUG_VALUE: simple_write2_one_val_f32:2 <- undef
+; CHECK-NEXT: .loc 1 3 1 ; <stdin>:3:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: global_load_dword v1, v0, s[0:1]
+; CHECK-NEXT: .Ltmp2:
+; CHECK-NEXT: ;DEBUG_VALUE: simple_write2_one_val_f32:6 <- [DW_OP_plus_uconst 32, DW_OP_stack_value] $vgpr0
+; CHECK-NEXT: .loc 1 0 0 is_stmt 0 ; <stdin>:0
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: ds_write2_b32 v0, v1, v1 offset1:8
+; CHECK-NEXT: .loc 1 9 1 is_stmt 1 ; <stdin>:9:1
+; CHECK-NEXT: s_endpgm
+; CHECK-NEXT: .Ltmp3:
+ %x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
+ %in.gep = getelementptr float, ptr addrspace(1) %in, i32 %x.i
+ %val = load float, ptr addrspace(1) %in.gep, align 4
+ %arrayidx0 = getelementptr inbounds [512 x float], ptr addrspace(3) @lds, i32 0, i32 %x.i
+ store float %val, ptr addrspace(3) %arrayidx0, align 4
+ %add.x = add nsw i32 %x.i, 8
+ %arrayidx1 = getelementptr inbounds [512 x float], ptr addrspace(3) @lds, i32 0, i32 %add.x
+ store float %val, ptr addrspace(3) %arrayidx1, align 4
+ ret void
+}
+
+define amdgpu_kernel void @simple_read2_f32(ptr addrspace(1) %out) #0 {
+; CHECK-LABEL: simple_read2_f32:
+; CHECK: .Lfunc_begin1:
+; CHECK-NEXT: .cfi_startproc
+; CHECK-NEXT: ; %bb.0:
+; CHECK-NEXT: .loc 1 11 1 prologue_end ; <stdin>:11:1
+; CHECK-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; CHECK-NEXT: .Ltmp4:
+; CHECK-NEXT: ;DEBUG_VALUE: simple_read2_f32:8 <- $vgpr2
+; CHECK-NEXT: .loc 1 0 0 is_stmt 0 ; <stdin>:0
+; CHECK-NEXT: ds_read2_b32 v[0:1], v2 offset1:8
+; CHECK-NEXT: .Ltmp5:
+; CHECK-NEXT: ;DEBUG_VALUE: simple_read2_f32:9 <- undef
+; CHECK-NEXT: ;DEBUG_VALUE: simple_read2_f32:11 <- [DW_OP_plus_uconst 32, DW_OP_stack_value] $vgpr2
+; CHECK-NEXT: ;DEBUG_VALUE: simple_read2_f32:12 <- undef
+; CHECK-NEXT: .loc 1 10 1 is_stmt 1 ; <stdin>:10:1
+; CHECK-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
+; CHECK-NEXT: .Ltmp6:
+; CHECK-NEXT: ;DEBUG_VALUE: simple_read2_f32:7 <- undef
+; CHECK-NEXT: ;DEBUG_VALUE: simple_read2_f32:10 <- [DW_OP_plus_uconst 8, DW_OP_stack_value] undef
+; CHECK-NEXT: .loc 1 16 1 ; <stdin>:16:1
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: v_add_f32_e32 v0, v0, v1
+; CHECK-NEXT: .Ltmp7:
+; CHECK-NEXT: ;DEBUG_VALUE: simple_read2_f32:13 <- $vgpr0
+; CHECK-NEXT: ;DEBUG_VALUE: simple_read2_f32:14 <- undef
+; CHECK-NEXT: .loc 1 18 1 ; <stdin>:18:1
+; CHECK-NEXT: global_store_dword v2, v0, s[0:1]
+; CHECK-NEXT: .loc 1 19 1 ; <stdin>:19:1
+; CHECK-NEXT: s_endpgm
+; CHECK-NEXT: .Ltmp8:
+ %x.i = tail call i32 @llvm.amdgcn.workitem.id.x() #1
+ %arrayidx0 = getelementptr inbounds [512 x float], ptr addrspace(3) @lds, i32 0, i32 %x.i
+ %val0 = load float, ptr addrspace(3) %arrayidx0, align 4
+ %add.x = add nsw i32 %x.i, 8
+ %arrayidx1 = getelementptr inbounds [512 x float], ptr addrspace(3) @lds, i32 0, i32 %add.x
+ %val1 = load float, ptr addrspace(3) %arrayidx1, align 4
+ %sum = fadd float %val0, %val1
+ %out.gep = getelementptr inbounds float, ptr addrspace(1) %out, i32 %x.i
+ store float %sum, ptr addrspace(1) %out.gep, align 4
+ ret void
+}
+
+attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/fabs.ll b/llvm/test/CodeGen/AMDGPU/fabs.ll
index 6bcb086..97e23fc 100644
--- a/llvm/test/CodeGen/AMDGPU/fabs.ll
+++ b/llvm/test/CodeGen/AMDGPU/fabs.ll
@@ -99,16 +99,15 @@ define amdgpu_kernel void @fabs_v2f32(ptr addrspace(1) %out, <2 x float> %in) {
; SI-LABEL: fabs_v2f32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: s_and_b32 s0, s3, 0x7fffffff
-; SI-NEXT: s_and_b32 s1, s2, 0x7fffffff
-; SI-NEXT: v_mov_b32_e32 v0, s1
-; SI-NEXT: v_mov_b32_e32 v1, s0
-; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_bitset0_b32 s5, 31
+; SI-NEXT: s_bitset0_b32 s4, 31
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: v_mov_b32_e32 v1, s5
+; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: fabs_v2f32:
diff --git a/llvm/test/CodeGen/AMDGPU/fdiv.ll b/llvm/test/CodeGen/AMDGPU/fdiv.ll
index b826e6c..4d448e6 100644
--- a/llvm/test/CodeGen/AMDGPU/fdiv.ll
+++ b/llvm/test/CodeGen/AMDGPU/fdiv.ll
@@ -333,18 +333,17 @@ define amdgpu_kernel void @s_fdiv_25ulp_f32(ptr addrspace(1) %out, float %a, flo
; GFX67-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GFX67-NEXT: v_mov_b32_e32 v0, 0x6f800000
; GFX67-NEXT: v_mov_b32_e32 v1, 0x2f800000
-; GFX67-NEXT: s_mov_b32 s7, 0xf000
-; GFX67-NEXT: s_mov_b32 s6, -1
; GFX67-NEXT: s_waitcnt lgkmcnt(0)
-; GFX67-NEXT: v_cmp_gt_f32_e64 vcc, |s3|, v0
+; GFX67-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX67-NEXT: v_cmp_gt_f32_e64 vcc, |s5|, v0
; GFX67-NEXT: v_cndmask_b32_e32 v0, 1.0, v1, vcc
-; GFX67-NEXT: v_mul_f32_e32 v1, s3, v0
+; GFX67-NEXT: v_mul_f32_e32 v1, s5, v0
; GFX67-NEXT: v_rcp_f32_e32 v1, v1
-; GFX67-NEXT: s_mov_b32 s4, s0
-; GFX67-NEXT: s_mov_b32 s5, s1
-; GFX67-NEXT: v_mul_f32_e32 v1, s2, v1
+; GFX67-NEXT: s_mov_b32 s3, 0xf000
+; GFX67-NEXT: s_mov_b32 s2, -1
+; GFX67-NEXT: v_mul_f32_e32 v1, s4, v1
; GFX67-NEXT: v_mul_f32_e32 v0, v0, v1
-; GFX67-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX67-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX67-NEXT: s_endpgm
;
; GFX8-LABEL: s_fdiv_25ulp_f32:
@@ -441,20 +440,19 @@ define amdgpu_kernel void @s_fdiv_25ulp_ieee_f32(ptr addrspace(1) %out, float %a
; GFX7-LABEL: s_fdiv_25ulp_ieee_f32:
; GFX7: ; %bb.0: ; %entry
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX7-NEXT: s_mov_b32 s7, 0xf000
-; GFX7-NEXT: s_mov_b32 s6, -1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_frexp_mant_f32_e32 v0, s3
+; GFX7-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX7-NEXT: v_frexp_mant_f32_e32 v0, s5
; GFX7-NEXT: v_rcp_f32_e32 v0, v0
-; GFX7-NEXT: v_frexp_exp_i32_f32_e32 v1, s3
-; GFX7-NEXT: v_frexp_exp_i32_f32_e32 v2, s2
-; GFX7-NEXT: v_frexp_mant_f32_e32 v3, s2
+; GFX7-NEXT: v_frexp_exp_i32_f32_e32 v1, s5
+; GFX7-NEXT: v_frexp_exp_i32_f32_e32 v2, s4
+; GFX7-NEXT: v_frexp_mant_f32_e32 v3, s4
; GFX7-NEXT: v_mul_f32_e32 v0, v3, v0
; GFX7-NEXT: v_sub_i32_e32 v1, vcc, v2, v1
-; GFX7-NEXT: s_mov_b32 s4, s0
-; GFX7-NEXT: s_mov_b32 s5, s1
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
+; GFX7-NEXT: s_mov_b32 s2, -1
; GFX7-NEXT: v_ldexp_f32_e32 v0, v0, v1
-; GFX7-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX7-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX7-NEXT: s_endpgm
;
; GFX8-LABEL: s_fdiv_25ulp_ieee_f32:
@@ -528,14 +526,13 @@ define amdgpu_kernel void @s_fdiv_fast_ieee_f32(ptr addrspace(1) %out, float %a,
; GFX67-LABEL: s_fdiv_fast_ieee_f32:
; GFX67: ; %bb.0: ; %entry
; GFX67-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX67-NEXT: s_mov_b32 s7, 0xf000
-; GFX67-NEXT: s_mov_b32 s6, -1
; GFX67-NEXT: s_waitcnt lgkmcnt(0)
-; GFX67-NEXT: v_rcp_f32_e32 v0, s3
-; GFX67-NEXT: s_mov_b32 s4, s0
-; GFX67-NEXT: s_mov_b32 s5, s1
-; GFX67-NEXT: v_mul_f32_e32 v0, s2, v0
-; GFX67-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX67-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX67-NEXT: v_rcp_f32_e32 v0, s5
+; GFX67-NEXT: s_mov_b32 s3, 0xf000
+; GFX67-NEXT: s_mov_b32 s2, -1
+; GFX67-NEXT: v_mul_f32_e32 v0, s4, v0
+; GFX67-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX67-NEXT: s_endpgm
;
; GFX8-LABEL: s_fdiv_fast_ieee_f32:
@@ -590,14 +587,13 @@ define amdgpu_kernel void @s_fdiv_f32_fast_math(ptr addrspace(1) %out, float %a,
; GFX67-LABEL: s_fdiv_f32_fast_math:
; GFX67: ; %bb.0: ; %entry
; GFX67-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX67-NEXT: s_mov_b32 s7, 0xf000
-; GFX67-NEXT: s_mov_b32 s6, -1
; GFX67-NEXT: s_waitcnt lgkmcnt(0)
-; GFX67-NEXT: v_rcp_f32_e32 v0, s3
-; GFX67-NEXT: s_mov_b32 s4, s0
-; GFX67-NEXT: s_mov_b32 s5, s1
-; GFX67-NEXT: v_mul_f32_e32 v0, s2, v0
-; GFX67-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX67-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX67-NEXT: v_rcp_f32_e32 v0, s5
+; GFX67-NEXT: s_mov_b32 s3, 0xf000
+; GFX67-NEXT: s_mov_b32 s2, -1
+; GFX67-NEXT: v_mul_f32_e32 v0, s4, v0
+; GFX67-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX67-NEXT: s_endpgm
;
; GFX8-LABEL: s_fdiv_f32_fast_math:
@@ -652,14 +648,13 @@ define amdgpu_kernel void @s_fdiv_ulp25_f32_fast_math(ptr addrspace(1) %out, flo
; GFX67-LABEL: s_fdiv_ulp25_f32_fast_math:
; GFX67: ; %bb.0: ; %entry
; GFX67-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX67-NEXT: s_mov_b32 s7, 0xf000
-; GFX67-NEXT: s_mov_b32 s6, -1
; GFX67-NEXT: s_waitcnt lgkmcnt(0)
-; GFX67-NEXT: v_rcp_f32_e32 v0, s3
-; GFX67-NEXT: s_mov_b32 s4, s0
-; GFX67-NEXT: s_mov_b32 s5, s1
-; GFX67-NEXT: v_mul_f32_e32 v0, s2, v0
-; GFX67-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX67-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX67-NEXT: v_rcp_f32_e32 v0, s5
+; GFX67-NEXT: s_mov_b32 s3, 0xf000
+; GFX67-NEXT: s_mov_b32 s2, -1
+; GFX67-NEXT: v_mul_f32_e32 v0, s4, v0
+; GFX67-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX67-NEXT: s_endpgm
;
; GFX8-LABEL: s_fdiv_ulp25_f32_fast_math:
@@ -877,14 +872,13 @@ define amdgpu_kernel void @s_fdiv_f32_arcp_ninf(ptr addrspace(1) %out, float %a,
; GFX67-LABEL: s_fdiv_f32_arcp_ninf:
; GFX67: ; %bb.0: ; %entry
; GFX67-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX67-NEXT: s_mov_b32 s7, 0xf000
-; GFX67-NEXT: s_mov_b32 s6, -1
; GFX67-NEXT: s_waitcnt lgkmcnt(0)
-; GFX67-NEXT: v_rcp_f32_e32 v0, s3
-; GFX67-NEXT: s_mov_b32 s4, s0
-; GFX67-NEXT: s_mov_b32 s5, s1
-; GFX67-NEXT: v_mul_f32_e32 v0, s2, v0
-; GFX67-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX67-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX67-NEXT: v_rcp_f32_e32 v0, s5
+; GFX67-NEXT: s_mov_b32 s3, 0xf000
+; GFX67-NEXT: s_mov_b32 s2, -1
+; GFX67-NEXT: v_mul_f32_e32 v0, s4, v0
+; GFX67-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX67-NEXT: s_endpgm
;
; GFX8-LABEL: s_fdiv_f32_arcp_ninf:
diff --git a/llvm/test/CodeGen/AMDGPU/fmin_legacy.ll b/llvm/test/CodeGen/AMDGPU/fmin_legacy.ll
index defcffa..39eefa1 100644
--- a/llvm/test/CodeGen/AMDGPU/fmin_legacy.ll
+++ b/llvm/test/CodeGen/AMDGPU/fmin_legacy.ll
@@ -75,9 +75,12 @@ define amdgpu_kernel void @s_test_fmin_legacy_ule_f32_fast(ptr addrspace(1) %out
; GCN-LABEL: {{^}}s_test_fmin_legacy_ule_f32_nnan_src:
; GCN: s_load_dwordx4 s[[[#LOAD:]]:{{[0-9]+}}], s{{\[[0-9]+:[0-9]+\]}}, {{0x9|0x24}}
-; GCN-DAG: v_add_f32_e64 [[ADD_A:v[0-9]+]], s[[#LOAD + 2]], 1.0
-; GCN-DAG: v_add_f32_e64 [[ADD_B:v[0-9]+]], s[[#LOAD + 3]], 2.0
+; SI: s_mov_b64 s[[[#COPY:]]:{{[0-9]+}}], s{{\[}}[[#LOAD + 2]]:[[#LOAD + 3]]{{\]}}
+; SI-DAG: v_add_f32_e64 [[ADD_A:v[0-9]+]], s[[#COPY]], 1.0
+; SI-DAG: v_add_f32_e64 [[ADD_B:v[0-9]+]], s[[#COPY + 1]], 2.0
+; VI-DAG: v_add_f32_e64 [[ADD_A:v[0-9]+]], s[[#LOAD + 2]], 1.0
+; VI-DAG: v_add_f32_e64 [[ADD_B:v[0-9]+]], s[[#LOAD + 3]], 2.0
; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, [[ADD_B]], [[ADD_A]]
; VI: v_cmp_ngt_f32_e32 vcc, [[ADD_A]], [[ADD_B]]
@@ -96,8 +99,12 @@ define amdgpu_kernel void @s_test_fmin_legacy_ule_f32_nnan_src(ptr addrspace(1)
; GCN-LABEL: {{^}}s_test_fmin_legacy_ule_f32_nnan_src_fast:
; GCN: s_load_dwordx4 s[[[#LOAD:]]:{{[0-9]+}}], s{{\[[0-9]+:[0-9]+\]}}, {{0x9|0x24}}
-; GCN-DAG: v_add_f32_e64 [[ADD_A:v[0-9]+]], s[[#LOAD + 2]], 1.0
-; GCN-DAG: v_add_f32_e64 [[ADD_B:v[0-9]+]], s[[#LOAD + 3]], 2.0
+; SI: s_mov_b64 s[[[#COPY:]]:{{[0-9]+}}], s{{\[}}[[#LOAD + 2]]:[[#LOAD + 3]]{{\]}}
+; SI-DAG: v_add_f32_e64 [[ADD_A:v[0-9]+]], s[[#COPY]], 1.0
+; SI-DAG: v_add_f32_e64 [[ADD_B:v[0-9]+]], s[[#COPY + 1]], 2.0
+
+; VI-DAG: v_add_f32_e64 [[ADD_A:v[0-9]+]], s[[#LOAD + 2]], 1.0
+; VI-DAG: v_add_f32_e64 [[ADD_B:v[0-9]+]], s[[#LOAD + 3]], 2.0
; GCN: v_min_f32_e32 {{v[0-9]+}}, [[ADD_A]], [[ADD_B]]
define amdgpu_kernel void @s_test_fmin_legacy_ule_f32_nnan_src_fast(ptr addrspace(1) %out, float %a, float %b) #0 {
diff --git a/llvm/test/CodeGen/AMDGPU/fnearbyint.ll b/llvm/test/CodeGen/AMDGPU/fnearbyint.ll
index a025c36..6c2ab5f 100644
--- a/llvm/test/CodeGen/AMDGPU/fnearbyint.ll
+++ b/llvm/test/CodeGen/AMDGPU/fnearbyint.ll
@@ -121,14 +121,13 @@ define amdgpu_kernel void @fnearbyint_v2f32(ptr addrspace(1) %out, <2 x float> %
; SICI-LABEL: fnearbyint_v2f32:
; SICI: ; %bb.0: ; %entry
; SICI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SICI-NEXT: s_mov_b32 s7, 0xf000
-; SICI-NEXT: s_mov_b32 s6, -1
; SICI-NEXT: s_waitcnt lgkmcnt(0)
-; SICI-NEXT: s_mov_b32 s4, s0
-; SICI-NEXT: s_mov_b32 s5, s1
-; SICI-NEXT: v_rndne_f32_e32 v1, s3
-; SICI-NEXT: v_rndne_f32_e32 v0, s2
-; SICI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; SICI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SICI-NEXT: s_mov_b32 s3, 0xf000
+; SICI-NEXT: s_mov_b32 s2, -1
+; SICI-NEXT: v_rndne_f32_e32 v1, s5
+; SICI-NEXT: v_rndne_f32_e32 v0, s4
+; SICI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; SICI-NEXT: s_endpgm
;
; VI-LABEL: fnearbyint_v2f32:
diff --git a/llvm/test/CodeGen/AMDGPU/fneg-fabs.ll b/llvm/test/CodeGen/AMDGPU/fneg-fabs.ll
index 1fa9bfa..214cced 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg-fabs.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg-fabs.ll
@@ -199,16 +199,15 @@ define amdgpu_kernel void @fneg_fabsf_v2f32(ptr addrspace(1) %out, <2 x float> %
; SI-LABEL: fneg_fabsf_v2f32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_bitset1_b32 s3, 31
-; SI-NEXT: s_bitset1_b32 s2, 31
-; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: v_mov_b32_e32 v0, s2
-; SI-NEXT: v_mov_b32_e32 v1, s3
-; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_bitset1_b32 s5, 31
+; SI-NEXT: s_bitset1_b32 s4, 31
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: v_mov_b32_e32 v1, s5
+; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: fneg_fabsf_v2f32:
diff --git a/llvm/test/CodeGen/AMDGPU/fneg.ll b/llvm/test/CodeGen/AMDGPU/fneg.ll
index c3f4ebe3..0223515 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg.ll
@@ -52,16 +52,15 @@ define amdgpu_kernel void @s_fneg_v2f32(ptr addrspace(1) nocapture %out, <2 x fl
; SI-LABEL: s_fneg_v2f32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: s_xor_b32 s0, s3, 0x80000000
-; SI-NEXT: s_xor_b32 s1, s2, 0x80000000
-; SI-NEXT: v_mov_b32_e32 v0, s1
-; SI-NEXT: v_mov_b32_e32 v1, s0
-; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_xor_b32 s5, s5, 0x80000000
+; SI-NEXT: s_xor_b32 s4, s4, 0x80000000
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: v_mov_b32_e32 v1, s5
+; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: s_fneg_v2f32:
diff --git a/llvm/test/CodeGen/AMDGPU/fp_to_sint.ll b/llvm/test/CodeGen/AMDGPU/fp_to_sint.ll
index 7ab8b30..0c5ed00 100644
--- a/llvm/test/CodeGen/AMDGPU/fp_to_sint.ll
+++ b/llvm/test/CodeGen/AMDGPU/fp_to_sint.ll
@@ -88,27 +88,24 @@ define amdgpu_kernel void @fp_to_sint_v2i32(ptr addrspace(1) %out, <2 x float> %
; SI-LABEL: fp_to_sint_v2i32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: v_cvt_i32_f32_e32 v1, s3
-; SI-NEXT: v_cvt_i32_f32_e32 v0, s2
-; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_cvt_i32_f32_e32 v1, s5
+; SI-NEXT: v_cvt_i32_f32_e32 v0, s4
+; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: fp_to_sint_v2i32:
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT: s_mov_b32 s7, 0xf000
-; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_cvt_i32_f32_e32 v1, s3
; VI-NEXT: v_cvt_i32_f32_e32 v0, s2
-; VI-NEXT: s_mov_b32 s4, s0
-; VI-NEXT: s_mov_b32 s5, s1
-; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; EG-LABEL: fp_to_sint_v2i32:
@@ -294,26 +291,25 @@ entry:
define amdgpu_kernel void @fp_to_sint_v2i64(ptr addrspace(1) %out, <2 x float> %x) {
; SI-LABEL: fp_to_sint_v2i64:
; SI: ; %bb.0:
-; SI-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x9
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_mov_b32 s6, 0x2f800000
+; SI-NEXT: s_mov_b32 s7, 0xcf800000
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
-; SI-NEXT: s_mov_b32 s8, 0x2f800000
-; SI-NEXT: s_mov_b32 s9, 0xcf800000
-; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_mov_b32 s0, s4
-; SI-NEXT: s_mov_b32 s1, s5
-; SI-NEXT: v_trunc_f32_e32 v0, s7
-; SI-NEXT: v_trunc_f32_e32 v1, s6
-; SI-NEXT: v_mul_f32_e64 v2, |v0|, s8
+; SI-NEXT: v_trunc_f32_e32 v0, s5
+; SI-NEXT: v_trunc_f32_e32 v1, s4
+; SI-NEXT: v_mul_f32_e64 v2, |v0|, s6
; SI-NEXT: v_ashrrev_i32_e32 v3, 31, v0
-; SI-NEXT: v_mul_f32_e64 v4, |v1|, s8
+; SI-NEXT: v_mul_f32_e64 v4, |v1|, s6
; SI-NEXT: v_ashrrev_i32_e32 v5, 31, v1
; SI-NEXT: v_floor_f32_e32 v2, v2
; SI-NEXT: v_floor_f32_e32 v4, v4
; SI-NEXT: v_cvt_u32_f32_e32 v6, v2
-; SI-NEXT: v_fma_f32 v0, v2, s9, |v0|
+; SI-NEXT: v_fma_f32 v0, v2, s7, |v0|
; SI-NEXT: v_cvt_u32_f32_e32 v2, v4
-; SI-NEXT: v_fma_f32 v1, v4, s9, |v1|
+; SI-NEXT: v_fma_f32 v1, v4, s7, |v1|
; SI-NEXT: v_cvt_u32_f32_e32 v0, v0
; SI-NEXT: v_xor_b32_e32 v4, v6, v3
; SI-NEXT: v_cvt_u32_f32_e32 v1, v1
@@ -330,36 +326,35 @@ define amdgpu_kernel void @fp_to_sint_v2i64(ptr addrspace(1) %out, <2 x float> %
; VI-LABEL: fp_to_sint_v2i64:
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT: s_mov_b32 s8, 0x2f800000
-; VI-NEXT: s_mov_b32 s7, 0xf000
-; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_mov_b32 s6, 0x2f800000
+; VI-NEXT: s_mov_b32 s7, 0xcf800000
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: v_trunc_f32_e32 v0, s3
-; VI-NEXT: v_mul_f32_e64 v1, |v0|, s8
-; VI-NEXT: s_mov_b32 s4, s0
+; VI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; VI-NEXT: v_trunc_f32_e32 v0, s5
+; VI-NEXT: v_mul_f32_e64 v1, |v0|, s6
; VI-NEXT: v_floor_f32_e32 v1, v1
-; VI-NEXT: s_mov_b32 s0, 0xcf800000
-; VI-NEXT: v_fma_f32 v2, v1, s0, |v0|
-; VI-NEXT: v_trunc_f32_e32 v4, s2
-; VI-NEXT: v_cvt_u32_f32_e32 v2, v2
-; VI-NEXT: v_mul_f32_e64 v3, |v4|, s8
-; VI-NEXT: v_cvt_u32_f32_e32 v1, v1
-; VI-NEXT: v_floor_f32_e32 v3, v3
-; VI-NEXT: v_cvt_u32_f32_e32 v5, v3
-; VI-NEXT: v_fma_f32 v3, v3, s0, |v4|
+; VI-NEXT: v_cvt_u32_f32_e32 v2, v1
+; VI-NEXT: v_fma_f32 v1, v1, s7, |v0|
; VI-NEXT: v_ashrrev_i32_e32 v0, 31, v0
-; VI-NEXT: v_cvt_u32_f32_e32 v6, v3
-; VI-NEXT: v_xor_b32_e32 v2, v2, v0
+; VI-NEXT: v_trunc_f32_e32 v4, s4
+; VI-NEXT: v_xor_b32_e32 v3, v2, v0
+; VI-NEXT: v_mul_f32_e64 v2, |v4|, s6
+; VI-NEXT: v_cvt_u32_f32_e32 v1, v1
+; VI-NEXT: v_floor_f32_e32 v2, v2
+; VI-NEXT: v_cvt_u32_f32_e32 v5, v2
+; VI-NEXT: v_fma_f32 v2, v2, s7, |v4|
+; VI-NEXT: v_cvt_u32_f32_e32 v6, v2
; VI-NEXT: v_xor_b32_e32 v1, v1, v0
-; VI-NEXT: v_sub_u32_e32 v2, vcc, v2, v0
-; VI-NEXT: v_subb_u32_e32 v3, vcc, v1, v0, vcc
+; VI-NEXT: v_sub_u32_e32 v2, vcc, v1, v0
; VI-NEXT: v_ashrrev_i32_e32 v1, 31, v4
+; VI-NEXT: v_subb_u32_e32 v3, vcc, v3, v0, vcc
; VI-NEXT: v_xor_b32_e32 v0, v6, v1
; VI-NEXT: v_xor_b32_e32 v4, v5, v1
; VI-NEXT: v_sub_u32_e32 v0, vcc, v0, v1
-; VI-NEXT: s_mov_b32 s5, s1
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: v_subb_u32_e32 v1, vcc, v4, v1, vcc
-; VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0
+; VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; EG-LABEL: fp_to_sint_v2i64:
diff --git a/llvm/test/CodeGen/AMDGPU/fp_to_uint.ll b/llvm/test/CodeGen/AMDGPU/fp_to_uint.ll
index 5428ba8..c938475 100644
--- a/llvm/test/CodeGen/AMDGPU/fp_to_uint.ll
+++ b/llvm/test/CodeGen/AMDGPU/fp_to_uint.ll
@@ -48,27 +48,24 @@ define amdgpu_kernel void @fp_to_uint_v2f32_to_v2i32(ptr addrspace(1) %out, <2 x
; SI-LABEL: fp_to_uint_v2f32_to_v2i32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: v_cvt_u32_f32_e32 v1, s3
-; SI-NEXT: v_cvt_u32_f32_e32 v0, s2
-; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_cvt_u32_f32_e32 v1, s5
+; SI-NEXT: v_cvt_u32_f32_e32 v0, s4
+; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: fp_to_uint_v2f32_to_v2i32:
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT: s_mov_b32 s7, 0xf000
-; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_cvt_u32_f32_e32 v1, s3
; VI-NEXT: v_cvt_u32_f32_e32 v0, s2
-; VI-NEXT: s_mov_b32 s4, s0
-; VI-NEXT: s_mov_b32 s5, s1
-; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; EG-LABEL: fp_to_uint_v2f32_to_v2i32:
@@ -241,32 +238,29 @@ define amdgpu_kernel void @fp_to_uint_v2f32_to_v2i64(ptr addrspace(1) %out, <2 x
; SI-LABEL: fp_to_uint_v2f32_to_v2i64:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: s_mov_b32 s8, 0xcf800000
+; SI-NEXT: s_mov_b32 s6, 0xcf800000
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: v_trunc_f32_e32 v0, s3
-; SI-NEXT: v_trunc_f32_e32 v2, s2
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_trunc_f32_e32 v0, s5
+; SI-NEXT: v_trunc_f32_e32 v2, s4
; SI-NEXT: v_mul_f32_e32 v1, 0x2f800000, v0
; SI-NEXT: v_mul_f32_e32 v3, 0x2f800000, v2
; SI-NEXT: v_floor_f32_e32 v4, v1
; SI-NEXT: v_floor_f32_e32 v5, v3
; SI-NEXT: v_cvt_u32_f32_e32 v3, v4
; SI-NEXT: v_cvt_u32_f32_e32 v1, v5
-; SI-NEXT: v_fma_f32 v0, v4, s8, v0
-; SI-NEXT: v_fma_f32 v4, v5, s8, v2
+; SI-NEXT: v_fma_f32 v0, v4, s6, v0
+; SI-NEXT: v_fma_f32 v4, v5, s6, v2
; SI-NEXT: v_cvt_u32_f32_e32 v2, v0
; SI-NEXT: v_cvt_u32_f32_e32 v0, v4
-; SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0
+; SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: fp_to_uint_v2f32_to_v2i64:
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT: s_mov_b32 s7, 0xf000
-; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_trunc_f32_e32 v0, s3
; VI-NEXT: v_trunc_f32_e32 v4, s2
@@ -281,9 +275,9 @@ define amdgpu_kernel void @fp_to_uint_v2f32_to_v2i64(ptr addrspace(1) %out, <2 x
; VI-NEXT: v_cvt_u32_f32_e32 v3, v5
; VI-NEXT: v_cvt_u32_f32_e32 v1, v6
; VI-NEXT: v_cvt_u32_f32_e32 v0, v0
-; VI-NEXT: s_mov_b32 s4, s0
-; VI-NEXT: s_mov_b32 s5, s1
-; VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; EG-LABEL: fp_to_uint_v2f32_to_v2i64:
diff --git a/llvm/test/CodeGen/AMDGPU/fshl.ll b/llvm/test/CodeGen/AMDGPU/fshl.ll
index ed1ee45..68b95cd 100644
--- a/llvm/test/CodeGen/AMDGPU/fshl.ll
+++ b/llvm/test/CodeGen/AMDGPU/fshl.ll
@@ -691,17 +691,16 @@ define amdgpu_kernel void @orxor2or1(ptr addrspace(1) %in, i32 %a, i32 %b) {
; SI-LABEL: orxor2or1:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_lshl_b32 s0, s2, 7
-; SI-NEXT: s_or_b32 s0, s3, s0
-; SI-NEXT: s_cmp_eq_u32 s0, 0
-; SI-NEXT: s_cselect_b32 s0, s2, s3
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: v_mov_b32_e32 v0, s0
-; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_lshl_b32 s6, s4, 7
+; SI-NEXT: s_or_b32 s6, s5, s6
+; SI-NEXT: s_cmp_eq_u32 s6, 0
+; SI-NEXT: s_cselect_b32 s4, s4, s5
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: orxor2or1:
diff --git a/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll b/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll
index 44bd409..7cbf9ae 100644
--- a/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll
+++ b/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll
@@ -1508,35 +1508,33 @@ define amdgpu_kernel void @dynamic_insertelement_v2i16(ptr addrspace(1) %out, <2
; SI-LABEL: dynamic_insertelement_v2i16:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
-; SI-NEXT: s_mov_b32 s7, 0x100f000
-; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: s_lshl_b32 s1, s3, 4
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_xor_b32 s0, s2, 0x50005
-; SI-NEXT: s_lshl_b32 s1, 0xffff, s1
-; SI-NEXT: s_and_b32 s0, s0, s1
-; SI-NEXT: s_xor_b32 s0, s0, s2
-; SI-NEXT: v_mov_b32_e32 v0, s0
-; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_lshl_b32 s5, s5, 4
+; SI-NEXT: s_xor_b32 s6, s4, 0x50005
+; SI-NEXT: s_lshl_b32 s5, 0xffff, s5
+; SI-NEXT: s_and_b32 s5, s6, s5
+; SI-NEXT: s_xor_b32 s4, s5, s4
+; SI-NEXT: s_mov_b32 s3, 0x100f000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: dynamic_insertelement_v2i16:
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
-; VI-NEXT: s_mov_b32 s7, 0x1100f000
-; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: s_mov_b32 s5, s1
-; VI-NEXT: s_lshl_b32 s1, s3, 4
-; VI-NEXT: s_mov_b32 s4, s0
-; VI-NEXT: s_xor_b32 s0, s2, 0x50005
-; VI-NEXT: s_lshl_b32 s1, 0xffff, s1
-; VI-NEXT: s_and_b32 s0, s0, s1
-; VI-NEXT: s_xor_b32 s0, s0, s2
-; VI-NEXT: v_mov_b32_e32 v0, s0
-; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; VI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; VI-NEXT: s_lshl_b32 s5, s5, 4
+; VI-NEXT: s_xor_b32 s6, s4, 0x50005
+; VI-NEXT: s_lshl_b32 s5, 0xffff, s5
+; VI-NEXT: s_and_b32 s5, s6, s5
+; VI-NEXT: s_xor_b32 s4, s5, s4
+; VI-NEXT: s_mov_b32 s3, 0x1100f000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_mov_b32_e32 v0, s4
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
%vecins = insertelement <2 x i16> %a, i16 5, i32 %b
store <2 x i16> %vecins, ptr addrspace(1) %out, align 8
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ubfe.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ubfe.ll
index d4aa2051..e421e2c 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ubfe.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ubfe.ll
@@ -1612,29 +1612,27 @@ define amdgpu_kernel void @v_lshr_and(ptr addrspace(1) %out, i32 %a, i32 %b) #0
; SI-LABEL: v_lshr_and:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_lshr_b32 s2, s2, s3
-; SI-NEXT: s_and_b32 s2, s2, 7
-; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: v_mov_b32_e32 v0, s2
-; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_lshr_b32 s2, s4, s5
+; SI-NEXT: s_and_b32 s4, s2, 7
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: v_lshr_and:
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT: s_mov_b32 s7, 0xf000
-; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: s_mov_b32 s4, s0
-; VI-NEXT: s_lshr_b32 s0, s2, s3
-; VI-NEXT: s_and_b32 s0, s0, 7
-; VI-NEXT: s_mov_b32 s5, s1
-; VI-NEXT: v_mov_b32_e32 v0, s0
-; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; VI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; VI-NEXT: s_lshr_b32 s4, s4, s5
+; VI-NEXT: s_and_b32 s4, s4, 7
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_mov_b32_e32 v0, s4
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
%c = lshr i32 %a, %b
%d = and i32 %c, 7
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.exp.ll b/llvm/test/CodeGen/AMDGPU/llvm.exp.ll
index ac356fa..3897a0e 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.exp.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.exp.ll
@@ -520,42 +520,41 @@ define amdgpu_kernel void @s_exp_v2f32(ptr addrspace(1) %out, <2 x float> %in) {
;
; SI-SDAG-LABEL: s_exp_v2f32:
; SI-SDAG: ; %bb.0:
-; SI-SDAG-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x9
+; SI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; SI-SDAG-NEXT: v_mov_b32_e32 v0, 0x3fb8aa3b
; SI-SDAG-NEXT: v_mov_b32_e32 v1, 0x32a5705f
-; SI-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; SI-SDAG-NEXT: s_mov_b32 s2, -1
; SI-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; SI-SDAG-NEXT: v_mul_f32_e32 v2, s7, v0
+; SI-SDAG-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-SDAG-NEXT: v_mul_f32_e32 v2, s5, v0
; SI-SDAG-NEXT: v_rndne_f32_e32 v3, v2
-; SI-SDAG-NEXT: v_fma_f32 v4, s7, v0, -v2
+; SI-SDAG-NEXT: v_fma_f32 v4, s5, v0, -v2
; SI-SDAG-NEXT: v_sub_f32_e32 v2, v2, v3
-; SI-SDAG-NEXT: v_fma_f32 v4, s7, v1, v4
+; SI-SDAG-NEXT: v_fma_f32 v4, s5, v1, v4
; SI-SDAG-NEXT: v_add_f32_e32 v2, v2, v4
-; SI-SDAG-NEXT: v_mul_f32_e32 v5, s6, v0
-; SI-SDAG-NEXT: v_exp_f32_e32 v2, v2
+; SI-SDAG-NEXT: v_mul_f32_e32 v5, s4, v0
; SI-SDAG-NEXT: v_cvt_i32_f32_e32 v3, v3
+; SI-SDAG-NEXT: v_exp_f32_e32 v2, v2
; SI-SDAG-NEXT: v_rndne_f32_e32 v6, v5
-; SI-SDAG-NEXT: v_fma_f32 v0, s6, v0, -v5
+; SI-SDAG-NEXT: v_fma_f32 v0, s4, v0, -v5
; SI-SDAG-NEXT: v_sub_f32_e32 v7, v5, v6
-; SI-SDAG-NEXT: v_fma_f32 v0, s6, v1, v0
+; SI-SDAG-NEXT: v_fma_f32 v0, s4, v1, v0
; SI-SDAG-NEXT: v_add_f32_e32 v0, v7, v0
; SI-SDAG-NEXT: v_exp_f32_e32 v0, v0
; SI-SDAG-NEXT: v_cvt_i32_f32_e32 v5, v6
; SI-SDAG-NEXT: v_ldexp_f32_e32 v2, v2, v3
; SI-SDAG-NEXT: v_mov_b32_e32 v3, 0xc2ce8ed0
-; SI-SDAG-NEXT: v_cmp_nlt_f32_e32 vcc, s7, v3
+; SI-SDAG-NEXT: v_cmp_nlt_f32_e32 vcc, s5, v3
; SI-SDAG-NEXT: v_mov_b32_e32 v4, 0x42b17218
; SI-SDAG-NEXT: v_cndmask_b32_e32 v2, 0, v2, vcc
; SI-SDAG-NEXT: v_mov_b32_e32 v6, 0x7f800000
-; SI-SDAG-NEXT: v_cmp_ngt_f32_e32 vcc, s7, v4
+; SI-SDAG-NEXT: v_cmp_ngt_f32_e32 vcc, s5, v4
; SI-SDAG-NEXT: v_cndmask_b32_e32 v1, v6, v2, vcc
; SI-SDAG-NEXT: v_ldexp_f32_e32 v0, v0, v5
-; SI-SDAG-NEXT: v_cmp_nlt_f32_e32 vcc, s6, v3
+; SI-SDAG-NEXT: v_cmp_nlt_f32_e32 vcc, s4, v3
; SI-SDAG-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
-; SI-SDAG-NEXT: v_cmp_ngt_f32_e32 vcc, s6, v4
-; SI-SDAG-NEXT: s_mov_b32 s0, s4
-; SI-SDAG-NEXT: s_mov_b32 s1, s5
+; SI-SDAG-NEXT: v_cmp_ngt_f32_e32 vcc, s4, v4
+; SI-SDAG-NEXT: s_mov_b32 s3, 0xf000
+; SI-SDAG-NEXT: s_mov_b32 s2, -1
; SI-SDAG-NEXT: v_cndmask_b32_e32 v0, v6, v0, vcc
; SI-SDAG-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; SI-SDAG-NEXT: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.exp10.ll b/llvm/test/CodeGen/AMDGPU/llvm.exp10.ll
index d12ebe4..3928ec2 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.exp10.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.exp10.ll
@@ -522,42 +522,41 @@ define amdgpu_kernel void @s_exp10_v2f32(ptr addrspace(1) %out, <2 x float> %in)
;
; SI-SDAG-LABEL: s_exp10_v2f32:
; SI-SDAG: ; %bb.0:
-; SI-SDAG-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x9
+; SI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; SI-SDAG-NEXT: v_mov_b32_e32 v0, 0x40549a78
; SI-SDAG-NEXT: v_mov_b32_e32 v1, 0x33979a37
-; SI-SDAG-NEXT: s_mov_b32 s3, 0xf000
-; SI-SDAG-NEXT: s_mov_b32 s2, -1
; SI-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; SI-SDAG-NEXT: v_mul_f32_e32 v2, s7, v0
+; SI-SDAG-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-SDAG-NEXT: v_mul_f32_e32 v2, s5, v0
; SI-SDAG-NEXT: v_rndne_f32_e32 v3, v2
-; SI-SDAG-NEXT: v_fma_f32 v4, s7, v0, -v2
+; SI-SDAG-NEXT: v_fma_f32 v4, s5, v0, -v2
; SI-SDAG-NEXT: v_sub_f32_e32 v2, v2, v3
-; SI-SDAG-NEXT: v_fma_f32 v4, s7, v1, v4
+; SI-SDAG-NEXT: v_fma_f32 v4, s5, v1, v4
; SI-SDAG-NEXT: v_add_f32_e32 v2, v2, v4
-; SI-SDAG-NEXT: v_mul_f32_e32 v5, s6, v0
-; SI-SDAG-NEXT: v_exp_f32_e32 v2, v2
+; SI-SDAG-NEXT: v_mul_f32_e32 v5, s4, v0
; SI-SDAG-NEXT: v_cvt_i32_f32_e32 v3, v3
+; SI-SDAG-NEXT: v_exp_f32_e32 v2, v2
; SI-SDAG-NEXT: v_rndne_f32_e32 v6, v5
-; SI-SDAG-NEXT: v_fma_f32 v0, s6, v0, -v5
+; SI-SDAG-NEXT: v_fma_f32 v0, s4, v0, -v5
; SI-SDAG-NEXT: v_sub_f32_e32 v7, v5, v6
-; SI-SDAG-NEXT: v_fma_f32 v0, s6, v1, v0
+; SI-SDAG-NEXT: v_fma_f32 v0, s4, v1, v0
; SI-SDAG-NEXT: v_add_f32_e32 v0, v7, v0
; SI-SDAG-NEXT: v_exp_f32_e32 v0, v0
; SI-SDAG-NEXT: v_cvt_i32_f32_e32 v5, v6
; SI-SDAG-NEXT: v_ldexp_f32_e32 v2, v2, v3
; SI-SDAG-NEXT: v_mov_b32_e32 v3, 0xc23369f4
-; SI-SDAG-NEXT: v_cmp_nlt_f32_e32 vcc, s7, v3
+; SI-SDAG-NEXT: v_cmp_nlt_f32_e32 vcc, s5, v3
; SI-SDAG-NEXT: v_mov_b32_e32 v4, 0x421a209b
; SI-SDAG-NEXT: v_cndmask_b32_e32 v2, 0, v2, vcc
; SI-SDAG-NEXT: v_mov_b32_e32 v6, 0x7f800000
-; SI-SDAG-NEXT: v_cmp_ngt_f32_e32 vcc, s7, v4
+; SI-SDAG-NEXT: v_cmp_ngt_f32_e32 vcc, s5, v4
; SI-SDAG-NEXT: v_cndmask_b32_e32 v1, v6, v2, vcc
; SI-SDAG-NEXT: v_ldexp_f32_e32 v0, v0, v5
-; SI-SDAG-NEXT: v_cmp_nlt_f32_e32 vcc, s6, v3
+; SI-SDAG-NEXT: v_cmp_nlt_f32_e32 vcc, s4, v3
; SI-SDAG-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
-; SI-SDAG-NEXT: v_cmp_ngt_f32_e32 vcc, s6, v4
-; SI-SDAG-NEXT: s_mov_b32 s0, s4
-; SI-SDAG-NEXT: s_mov_b32 s1, s5
+; SI-SDAG-NEXT: v_cmp_ngt_f32_e32 vcc, s4, v4
+; SI-SDAG-NEXT: s_mov_b32 s3, 0xf000
+; SI-SDAG-NEXT: s_mov_b32 s2, -1
; SI-SDAG-NEXT: v_cndmask_b32_e32 v0, v6, v0, vcc
; SI-SDAG-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; SI-SDAG-NEXT: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.exp2.ll b/llvm/test/CodeGen/AMDGPU/llvm.exp2.ll
index e30a586..dd44a1a 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.exp2.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.exp2.ll
@@ -176,26 +176,25 @@ define amdgpu_kernel void @s_exp2_v2f32(ptr addrspace(1) %out, <2 x float> %in)
; SI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; SI-SDAG-NEXT: v_mov_b32_e32 v0, 0xc2fc0000
; SI-SDAG-NEXT: v_mov_b32_e32 v1, 0x42800000
-; SI-SDAG-NEXT: s_mov_b32 s7, 0xf000
-; SI-SDAG-NEXT: s_mov_b32 s6, -1
; SI-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; SI-SDAG-NEXT: v_cmp_lt_f32_e32 vcc, s3, v0
+; SI-SDAG-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-SDAG-NEXT: v_cmp_lt_f32_e32 vcc, s5, v0
; SI-SDAG-NEXT: v_cndmask_b32_e32 v2, 0, v1, vcc
-; SI-SDAG-NEXT: s_mov_b32 s4, s0
-; SI-SDAG-NEXT: s_mov_b32 s5, s1
-; SI-SDAG-NEXT: s_and_b64 s[0:1], vcc, exec
-; SI-SDAG-NEXT: v_add_f32_e32 v2, s3, v2
-; SI-SDAG-NEXT: v_cmp_lt_f32_e32 vcc, s2, v0
-; SI-SDAG-NEXT: v_exp_f32_e32 v2, v2
+; SI-SDAG-NEXT: s_and_b64 s[6:7], vcc, exec
+; SI-SDAG-NEXT: v_cmp_lt_f32_e32 vcc, s4, v0
; SI-SDAG-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc
-; SI-SDAG-NEXT: v_add_f32_e32 v0, s2, v0
+; SI-SDAG-NEXT: v_add_f32_e32 v2, s5, v2
+; SI-SDAG-NEXT: v_add_f32_e32 v0, s4, v0
+; SI-SDAG-NEXT: v_exp_f32_e32 v2, v2
; SI-SDAG-NEXT: v_exp_f32_e32 v0, v0
-; SI-SDAG-NEXT: s_cselect_b32 s0, 0xffffffc0, 0
-; SI-SDAG-NEXT: v_ldexp_f32_e64 v1, v2, s0
-; SI-SDAG-NEXT: s_and_b64 s[0:1], vcc, exec
-; SI-SDAG-NEXT: s_cselect_b32 s0, 0xffffffc0, 0
-; SI-SDAG-NEXT: v_ldexp_f32_e64 v0, v0, s0
-; SI-SDAG-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; SI-SDAG-NEXT: s_cselect_b32 s6, 0xffffffc0, 0
+; SI-SDAG-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-SDAG-NEXT: s_cselect_b32 s4, 0xffffffc0, 0
+; SI-SDAG-NEXT: s_mov_b32 s3, 0xf000
+; SI-SDAG-NEXT: s_mov_b32 s2, -1
+; SI-SDAG-NEXT: v_ldexp_f32_e64 v1, v2, s6
+; SI-SDAG-NEXT: v_ldexp_f32_e64 v0, v0, s4
+; SI-SDAG-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; SI-SDAG-NEXT: s_endpgm
;
; SI-GISEL-LABEL: s_exp2_v2f32:
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.log.ll b/llvm/test/CodeGen/AMDGPU/llvm.log.ll
index b5038c8..fc6b2d9 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.log.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.log.ll
@@ -321,39 +321,38 @@ define amdgpu_kernel void @s_log_f32(ptr addrspace(1) %out, float %in) {
define amdgpu_kernel void @s_log_v2f32(ptr addrspace(1) %out, <2 x float> %in) {
; SI-SDAG-LABEL: s_log_v2f32:
; SI-SDAG: ; %bb.0:
-; SI-SDAG-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x9
+; SI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; SI-SDAG-NEXT: v_mov_b32_e32 v0, 0x800000
; SI-SDAG-NEXT: v_mov_b32_e32 v1, 0x41b17218
-; SI-SDAG-NEXT: s_mov_b32 s8, 0x3377d1cf
+; SI-SDAG-NEXT: s_mov_b32 s8, 0x3f317217
; SI-SDAG-NEXT: s_mov_b32 s9, 0x7f800000
; SI-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; SI-SDAG-NEXT: v_cmp_lt_f32_e32 vcc, s7, v0
-; SI-SDAG-NEXT: s_and_b64 s[0:1], vcc, exec
-; SI-SDAG-NEXT: s_cselect_b32 s0, 32, 0
-; SI-SDAG-NEXT: v_mov_b32_e32 v3, s0
-; SI-SDAG-NEXT: v_ldexp_f32_e32 v3, s7, v3
+; SI-SDAG-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-SDAG-NEXT: v_cmp_lt_f32_e32 vcc, s5, v0
+; SI-SDAG-NEXT: s_and_b64 s[2:3], vcc, exec
+; SI-SDAG-NEXT: s_cselect_b32 s2, 32, 0
+; SI-SDAG-NEXT: v_mov_b32_e32 v3, s2
+; SI-SDAG-NEXT: v_ldexp_f32_e32 v3, s5, v3
; SI-SDAG-NEXT: v_log_f32_e32 v3, v3
; SI-SDAG-NEXT: v_cndmask_b32_e32 v2, 0, v1, vcc
-; SI-SDAG-NEXT: v_cmp_lt_f32_e32 vcc, s6, v0
-; SI-SDAG-NEXT: s_mov_b32 s0, s4
-; SI-SDAG-NEXT: s_mov_b32 s1, s5
-; SI-SDAG-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-SDAG-NEXT: s_mov_b32 s7, 0x3f317217
+; SI-SDAG-NEXT: v_cmp_lt_f32_e32 vcc, s4, v0
+; SI-SDAG-NEXT: s_and_b64 s[6:7], vcc, exec
; SI-SDAG-NEXT: v_mul_f32_e32 v4, 0x3f317217, v3
-; SI-SDAG-NEXT: s_cselect_b32 s4, 32, 0
-; SI-SDAG-NEXT: v_fma_f32 v5, v3, s7, -v4
+; SI-SDAG-NEXT: s_cselect_b32 s6, 32, 0
+; SI-SDAG-NEXT: s_mov_b32 s5, 0x3377d1cf
+; SI-SDAG-NEXT: v_fma_f32 v5, v3, s8, -v4
; SI-SDAG-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc
-; SI-SDAG-NEXT: v_mov_b32_e32 v1, s4
-; SI-SDAG-NEXT: v_fma_f32 v5, v3, s8, v5
-; SI-SDAG-NEXT: v_ldexp_f32_e32 v1, s6, v1
+; SI-SDAG-NEXT: v_mov_b32_e32 v1, s6
+; SI-SDAG-NEXT: v_fma_f32 v5, v3, s5, v5
+; SI-SDAG-NEXT: v_ldexp_f32_e32 v1, s4, v1
; SI-SDAG-NEXT: v_add_f32_e32 v4, v4, v5
; SI-SDAG-NEXT: v_log_f32_e32 v5, v1
; SI-SDAG-NEXT: v_cmp_lt_f32_e64 vcc, |v3|, s9
; SI-SDAG-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
; SI-SDAG-NEXT: v_sub_f32_e32 v1, v1, v2
; SI-SDAG-NEXT: v_mul_f32_e32 v2, 0x3f317217, v5
-; SI-SDAG-NEXT: v_fma_f32 v3, v5, s7, -v2
-; SI-SDAG-NEXT: v_fma_f32 v3, v5, s8, v3
+; SI-SDAG-NEXT: v_fma_f32 v3, v5, s8, -v2
+; SI-SDAG-NEXT: v_fma_f32 v3, v5, s5, v3
; SI-SDAG-NEXT: v_add_f32_e32 v2, v2, v3
; SI-SDAG-NEXT: v_cmp_lt_f32_e64 vcc, |v5|, s9
; SI-SDAG-NEXT: v_cndmask_b32_e32 v2, v5, v2, vcc
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.log10.ll b/llvm/test/CodeGen/AMDGPU/llvm.log10.ll
index 7465b49..a141bce 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.log10.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.log10.ll
@@ -321,39 +321,38 @@ define amdgpu_kernel void @s_log10_f32(ptr addrspace(1) %out, float %in) {
define amdgpu_kernel void @s_log10_v2f32(ptr addrspace(1) %out, <2 x float> %in) {
; SI-SDAG-LABEL: s_log10_v2f32:
; SI-SDAG: ; %bb.0:
-; SI-SDAG-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x9
+; SI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; SI-SDAG-NEXT: v_mov_b32_e32 v0, 0x800000
; SI-SDAG-NEXT: v_mov_b32_e32 v1, 0x411a209b
-; SI-SDAG-NEXT: s_mov_b32 s8, 0x3284fbcf
+; SI-SDAG-NEXT: s_mov_b32 s8, 0x3e9a209a
; SI-SDAG-NEXT: s_mov_b32 s9, 0x7f800000
; SI-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; SI-SDAG-NEXT: v_cmp_lt_f32_e32 vcc, s7, v0
-; SI-SDAG-NEXT: s_and_b64 s[0:1], vcc, exec
-; SI-SDAG-NEXT: s_cselect_b32 s0, 32, 0
-; SI-SDAG-NEXT: v_mov_b32_e32 v3, s0
-; SI-SDAG-NEXT: v_ldexp_f32_e32 v3, s7, v3
+; SI-SDAG-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-SDAG-NEXT: v_cmp_lt_f32_e32 vcc, s5, v0
+; SI-SDAG-NEXT: s_and_b64 s[2:3], vcc, exec
+; SI-SDAG-NEXT: s_cselect_b32 s2, 32, 0
+; SI-SDAG-NEXT: v_mov_b32_e32 v3, s2
+; SI-SDAG-NEXT: v_ldexp_f32_e32 v3, s5, v3
; SI-SDAG-NEXT: v_log_f32_e32 v3, v3
; SI-SDAG-NEXT: v_cndmask_b32_e32 v2, 0, v1, vcc
-; SI-SDAG-NEXT: v_cmp_lt_f32_e32 vcc, s6, v0
-; SI-SDAG-NEXT: s_mov_b32 s0, s4
-; SI-SDAG-NEXT: s_mov_b32 s1, s5
-; SI-SDAG-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-SDAG-NEXT: s_mov_b32 s7, 0x3e9a209a
+; SI-SDAG-NEXT: v_cmp_lt_f32_e32 vcc, s4, v0
+; SI-SDAG-NEXT: s_and_b64 s[6:7], vcc, exec
; SI-SDAG-NEXT: v_mul_f32_e32 v4, 0x3e9a209a, v3
-; SI-SDAG-NEXT: s_cselect_b32 s4, 32, 0
-; SI-SDAG-NEXT: v_fma_f32 v5, v3, s7, -v4
+; SI-SDAG-NEXT: s_cselect_b32 s6, 32, 0
+; SI-SDAG-NEXT: s_mov_b32 s5, 0x3284fbcf
+; SI-SDAG-NEXT: v_fma_f32 v5, v3, s8, -v4
; SI-SDAG-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc
-; SI-SDAG-NEXT: v_mov_b32_e32 v1, s4
-; SI-SDAG-NEXT: v_fma_f32 v5, v3, s8, v5
-; SI-SDAG-NEXT: v_ldexp_f32_e32 v1, s6, v1
+; SI-SDAG-NEXT: v_mov_b32_e32 v1, s6
+; SI-SDAG-NEXT: v_fma_f32 v5, v3, s5, v5
+; SI-SDAG-NEXT: v_ldexp_f32_e32 v1, s4, v1
; SI-SDAG-NEXT: v_add_f32_e32 v4, v4, v5
; SI-SDAG-NEXT: v_log_f32_e32 v5, v1
; SI-SDAG-NEXT: v_cmp_lt_f32_e64 vcc, |v3|, s9
; SI-SDAG-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
; SI-SDAG-NEXT: v_sub_f32_e32 v1, v1, v2
; SI-SDAG-NEXT: v_mul_f32_e32 v2, 0x3e9a209a, v5
-; SI-SDAG-NEXT: v_fma_f32 v3, v5, s7, -v2
-; SI-SDAG-NEXT: v_fma_f32 v3, v5, s8, v3
+; SI-SDAG-NEXT: v_fma_f32 v3, v5, s8, -v2
+; SI-SDAG-NEXT: v_fma_f32 v3, v5, s5, v3
; SI-SDAG-NEXT: v_add_f32_e32 v2, v2, v3
; SI-SDAG-NEXT: v_cmp_lt_f32_e64 vcc, |v5|, s9
; SI-SDAG-NEXT: v_cndmask_b32_e32 v2, v5, v2, vcc
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.log2.ll b/llvm/test/CodeGen/AMDGPU/llvm.log2.ll
index 61a777f..b1407d3 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.log2.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.log2.ll
@@ -221,8 +221,6 @@ define amdgpu_kernel void @s_log2_v2f32(ptr addrspace(1) %out, <2 x float> %in)
; SI-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; SI-SDAG-NEXT: v_mov_b32_e32 v0, 0x800000
; SI-SDAG-NEXT: v_mov_b32_e32 v1, 0x42000000
-; SI-SDAG-NEXT: s_mov_b32 s7, 0xf000
-; SI-SDAG-NEXT: s_mov_b32 s6, -1
; SI-SDAG-NEXT: s_waitcnt lgkmcnt(0)
; SI-SDAG-NEXT: v_cmp_lt_f32_e32 vcc, s3, v0
; SI-SDAG-NEXT: s_and_b64 s[4:5], vcc, exec
@@ -238,11 +236,11 @@ define amdgpu_kernel void @s_log2_v2f32(ptr addrspace(1) %out, <2 x float> %in)
; SI-SDAG-NEXT: v_ldexp_f32_e32 v1, s2, v1
; SI-SDAG-NEXT: v_log_f32_e32 v3, v3
; SI-SDAG-NEXT: v_log_f32_e32 v4, v1
-; SI-SDAG-NEXT: s_mov_b32 s4, s0
-; SI-SDAG-NEXT: s_mov_b32 s5, s1
+; SI-SDAG-NEXT: s_mov_b32 s3, 0xf000
+; SI-SDAG-NEXT: s_mov_b32 s2, -1
; SI-SDAG-NEXT: v_sub_f32_e32 v1, v3, v2
; SI-SDAG-NEXT: v_sub_f32_e32 v0, v4, v0
-; SI-SDAG-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; SI-SDAG-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; SI-SDAG-NEXT: s_endpgm
;
; SI-GISEL-LABEL: s_log2_v2f32:
diff --git a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll
index 6dc9199..b6eaaf1 100644
--- a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll
+++ b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll
@@ -326,12 +326,12 @@ define void @local_atomic_fadd_noret_f32(ptr addrspace(3) %ptr) nounwind {
; GFX7-NEXT: .LBB2_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_add_f32_e32 v2, 4.0, v1
-; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX7-NEXT: v_mov_b32_e32 v2, v1
+; GFX7-NEXT: v_add_f32_e32 v1, 4.0, v2
+; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v1, v2
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB2_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -347,12 +347,12 @@ define void @local_atomic_fadd_noret_f32(ptr addrspace(3) %ptr) nounwind {
; GFX6-NEXT: .LBB2_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_add_f32_e32 v2, 4.0, v1
-; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX6-NEXT: v_mov_b32_e32 v2, v1
+; GFX6-NEXT: v_add_f32_e32 v1, 4.0, v2
+; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v1, v2
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB2_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -440,12 +440,12 @@ define void @local_atomic_fadd_noret_f32__offset(ptr addrspace(3) %ptr) nounwind
; GFX7-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_add_f32_e32 v2, 4.0, v1
-; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65532
+; GFX7-NEXT: v_mov_b32_e32 v2, v1
+; GFX7-NEXT: v_add_f32_e32 v1, 4.0, v2
+; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65532
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v1, v2
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB3_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -462,12 +462,12 @@ define void @local_atomic_fadd_noret_f32__offset(ptr addrspace(3) %ptr) nounwind
; GFX6-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_add_f32_e32 v2, 4.0, v1
-; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX6-NEXT: v_mov_b32_e32 v2, v1
+; GFX6-NEXT: v_add_f32_e32 v1, 4.0, v2
+; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v1, v2
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB3_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -880,13 +880,14 @@ define void @local_atomic_fadd_noret_f64(ptr addrspace(3) %ptr) nounwind {
; GFX12-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_add_f64_e32 v[3:4], 4.0, v[1:2]
+; GFX12-NEXT: v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v3, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_add_f64_e32 v[1:2], 4.0, v[3:4]
; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b64 v[3:4], v0, v[3:4], v[1:2]
+; GFX12-NEXT: ds_cmpstore_rtn_b64 v[1:2], v0, v[1:2], v[3:4]
; GFX12-NEXT: s_wait_dscnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[3:4], v[1:2]
-; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[1:2], v[3:4]
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-NEXT: s_wait_alu 0xfffe
@@ -913,13 +914,14 @@ define void @local_atomic_fadd_noret_f64(ptr addrspace(3) %ptr) nounwind {
; GFX11-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_add_f64 v[3:4], v[1:2], 4.0
+; GFX11-NEXT: v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v3, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_f64 v[1:2], v[3:4], 4.0
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b64 v[3:4], v0, v[3:4], v[1:2]
+; GFX11-NEXT: ds_cmpstore_rtn_b64 v[1:2], v0, v[1:2], v[3:4]
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[3:4], v[1:2]
-; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[1:2], v[3:4]
; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -936,14 +938,14 @@ define void @local_atomic_fadd_noret_f64(ptr addrspace(3) %ptr) nounwind {
; GFX10-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_add_f64 v[3:4], v[1:2], 4.0
+; GFX10-NEXT: v_mov_b32_e32 v4, v2
+; GFX10-NEXT: v_mov_b32_e32 v3, v1
+; GFX10-NEXT: v_add_f64 v[1:2], v[3:4], 4.0
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4]
+; GFX10-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2]
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[3:4], v[1:2]
-; GFX10-NEXT: v_mov_b32_e32 v1, v3
-; GFX10-NEXT: v_mov_b32_e32 v2, v4
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[1:2], v[3:4]
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB6_1
@@ -968,13 +970,13 @@ define void @local_atomic_fadd_noret_f64(ptr addrspace(3) %ptr) nounwind {
; GFX908-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_add_f64 v[3:4], v[1:2], 4.0
-; GFX908-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4]
+; GFX908-NEXT: v_mov_b32_e32 v4, v2
+; GFX908-NEXT: v_mov_b32_e32 v3, v1
+; GFX908-NEXT: v_add_f64 v[1:2], v[3:4], 4.0
+; GFX908-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2]
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2]
-; GFX908-NEXT: v_mov_b32_e32 v1, v3
+; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[1:2], v[3:4]
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v2, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB6_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -990,13 +992,13 @@ define void @local_atomic_fadd_noret_f64(ptr addrspace(3) %ptr) nounwind {
; GFX8-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_add_f64 v[3:4], v[1:2], 4.0
-; GFX8-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4]
+; GFX8-NEXT: v_mov_b32_e32 v4, v2
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: v_add_f64 v[1:2], v[3:4], 4.0
+; GFX8-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2]
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2]
-; GFX8-NEXT: v_mov_b32_e32 v1, v3
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[1:2], v[3:4]
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v2, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB6_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1012,13 +1014,13 @@ define void @local_atomic_fadd_noret_f64(ptr addrspace(3) %ptr) nounwind {
; GFX7-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_add_f64 v[3:4], v[1:2], 4.0
-; GFX7-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4]
+; GFX7-NEXT: v_mov_b32_e32 v4, v2
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: v_add_f64 v[1:2], v[3:4], 4.0
+; GFX7-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2]
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2]
-; GFX7-NEXT: v_mov_b32_e32 v1, v3
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[1:2], v[3:4]
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v2, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB6_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1034,13 +1036,13 @@ define void @local_atomic_fadd_noret_f64(ptr addrspace(3) %ptr) nounwind {
; GFX6-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_add_f64 v[3:4], v[1:2], 4.0
-; GFX6-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4]
+; GFX6-NEXT: v_mov_b32_e32 v4, v2
+; GFX6-NEXT: v_mov_b32_e32 v3, v1
+; GFX6-NEXT: v_add_f64 v[1:2], v[3:4], 4.0
+; GFX6-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2]
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2]
-; GFX6-NEXT: v_mov_b32_e32 v1, v3
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[1:2], v[3:4]
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v2, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB6_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1063,13 +1065,14 @@ define void @local_atomic_fadd_noret_f64__offset(ptr addrspace(3) %ptr) nounwind
; GFX12-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_add_f64_e32 v[3:4], 4.0, v[1:2]
+; GFX12-NEXT: v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v3, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_add_f64_e32 v[1:2], 4.0, v[3:4]
; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b64 v[3:4], v0, v[3:4], v[1:2] offset:65528
+; GFX12-NEXT: ds_cmpstore_rtn_b64 v[1:2], v0, v[1:2], v[3:4] offset:65528
; GFX12-NEXT: s_wait_dscnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[3:4], v[1:2]
-; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[1:2], v[3:4]
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-NEXT: s_wait_alu 0xfffe
@@ -1096,13 +1099,14 @@ define void @local_atomic_fadd_noret_f64__offset(ptr addrspace(3) %ptr) nounwind
; GFX11-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_add_f64 v[3:4], v[1:2], 4.0
+; GFX11-NEXT: v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v3, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_f64 v[1:2], v[3:4], 4.0
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b64 v[3:4], v0, v[3:4], v[1:2] offset:65528
+; GFX11-NEXT: ds_cmpstore_rtn_b64 v[1:2], v0, v[1:2], v[3:4] offset:65528
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[3:4], v[1:2]
-; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[1:2], v[3:4]
; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -1119,14 +1123,14 @@ define void @local_atomic_fadd_noret_f64__offset(ptr addrspace(3) %ptr) nounwind
; GFX10-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_add_f64 v[3:4], v[1:2], 4.0
+; GFX10-NEXT: v_mov_b32_e32 v4, v2
+; GFX10-NEXT: v_mov_b32_e32 v3, v1
+; GFX10-NEXT: v_add_f64 v[1:2], v[3:4], 4.0
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4] offset:65528
+; GFX10-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2] offset:65528
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[3:4], v[1:2]
-; GFX10-NEXT: v_mov_b32_e32 v1, v3
-; GFX10-NEXT: v_mov_b32_e32 v2, v4
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[1:2], v[3:4]
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB7_1
@@ -1151,13 +1155,13 @@ define void @local_atomic_fadd_noret_f64__offset(ptr addrspace(3) %ptr) nounwind
; GFX908-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_add_f64 v[3:4], v[1:2], 4.0
-; GFX908-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4] offset:65528
+; GFX908-NEXT: v_mov_b32_e32 v4, v2
+; GFX908-NEXT: v_mov_b32_e32 v3, v1
+; GFX908-NEXT: v_add_f64 v[1:2], v[3:4], 4.0
+; GFX908-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2] offset:65528
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2]
-; GFX908-NEXT: v_mov_b32_e32 v1, v3
+; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[1:2], v[3:4]
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v2, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB7_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1173,13 +1177,13 @@ define void @local_atomic_fadd_noret_f64__offset(ptr addrspace(3) %ptr) nounwind
; GFX8-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_add_f64 v[3:4], v[1:2], 4.0
-; GFX8-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4] offset:65528
+; GFX8-NEXT: v_mov_b32_e32 v4, v2
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: v_add_f64 v[1:2], v[3:4], 4.0
+; GFX8-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2] offset:65528
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2]
-; GFX8-NEXT: v_mov_b32_e32 v1, v3
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[1:2], v[3:4]
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v2, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB7_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1195,13 +1199,13 @@ define void @local_atomic_fadd_noret_f64__offset(ptr addrspace(3) %ptr) nounwind
; GFX7-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_add_f64 v[3:4], v[1:2], 4.0
-; GFX7-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4] offset:65528
+; GFX7-NEXT: v_mov_b32_e32 v4, v2
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: v_add_f64 v[1:2], v[3:4], 4.0
+; GFX7-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2] offset:65528
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2]
-; GFX7-NEXT: v_mov_b32_e32 v1, v3
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[1:2], v[3:4]
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v2, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB7_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1218,13 +1222,13 @@ define void @local_atomic_fadd_noret_f64__offset(ptr addrspace(3) %ptr) nounwind
; GFX6-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_add_f64 v[3:4], v[0:1], 4.0
-; GFX6-NEXT: ds_cmpst_rtn_b64 v[3:4], v2, v[0:1], v[3:4]
+; GFX6-NEXT: v_mov_b32_e32 v4, v1
+; GFX6-NEXT: v_mov_b32_e32 v3, v0
+; GFX6-NEXT: v_add_f64 v[0:1], v[3:4], 4.0
+; GFX6-NEXT: ds_cmpst_rtn_b64 v[0:1], v2, v[3:4], v[0:1]
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[0:1]
-; GFX6-NEXT: v_mov_b32_e32 v0, v3
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[3:4]
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v1, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB7_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2032,27 +2036,27 @@ define void @local_atomic_fadd_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
-; GFX12-TRUE16-NEXT: ds_load_b32 v2, v1
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2
; GFX12-TRUE16-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0
-; GFX12-TRUE16-NEXT: v_add_f16_e32 v4.l, 4.0, v4.l
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v3.l, 4.0, v3.l
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -2073,28 +2077,28 @@ define void @local_atomic_fadd_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
-; GFX12-FAKE16-NEXT: ds_load_b32 v2, v1
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-FAKE16-NEXT: ds_load_b32 v3, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2
; GFX12-FAKE16-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX12-FAKE16-NEXT: v_add_f16_e32 v4, 4.0, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v3, 4.0, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -2119,15 +2123,15 @@ define void @local_atomic_fadd_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX942-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX942-NEXT: v_add_f16_e32 v4, 4.0, v4
-; GFX942-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX942-NEXT: v_mov_b32_e32 v4, v3
+; GFX942-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX942-NEXT: v_add_f16_e32 v3, 4.0, v3
+; GFX942-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v3, v4
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB10_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2140,27 +2144,27 @@ define void @local_atomic_fadd_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
-; GFX11-TRUE16-NEXT: ds_load_b32 v2, v1
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2
; GFX11-TRUE16-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0
-; GFX11-TRUE16-NEXT: v_add_f16_e32 v4.l, 4.0, v4.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v3.l, 4.0, v3.l
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -2175,28 +2179,28 @@ define void @local_atomic_fadd_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
-; GFX11-FAKE16-NEXT: ds_load_b32 v2, v1
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2
; GFX11-FAKE16-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX11-FAKE16-NEXT: v_add_f16_e32 v4, 4.0, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v3, 4.0, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -2211,23 +2215,23 @@ define void @local_atomic_fadd_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX10-NEXT: v_and_b32_e32 v1, -4, v0
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX10-NEXT: s_mov_b32 s4, 0
-; GFX10-NEXT: ds_read_b32 v2, v1
-; GFX10-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX10-NEXT: ds_read_b32 v3, v1
+; GFX10-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX10-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX10-NEXT: v_not_b32_e32 v3, v3
+; GFX10-NEXT: v_not_b32_e32 v2, v2
; GFX10-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX10-NEXT: v_add_f16_e32 v4, 4.0, v4
-; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; GFX10-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX10-NEXT: v_add_f16_e32 v3, 4.0, v3
+; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v1, v2, v4
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX10-NEXT: v_mov_b32_e32 v2, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB10_1
@@ -2249,15 +2253,15 @@ define void @local_atomic_fadd_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX90A-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX90A-NEXT: v_add_f16_e32 v4, 4.0, v4
-; GFX90A-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX90A-NEXT: v_add_f16_e32 v3, 4.0, v3
+; GFX90A-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB10_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2278,15 +2282,15 @@ define void @local_atomic_fadd_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX908-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX908-NEXT: v_add_f16_e32 v4, 4.0, v4
-; GFX908-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX908-NEXT: v_add_f16_e32 v3, 4.0, v3
+; GFX908-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB10_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2308,16 +2312,16 @@ define void @local_atomic_fadd_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX8-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX8-NEXT: v_add_f16_e32 v4, 4.0, v4
-; GFX8-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX8-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX8-NEXT: v_add_f16_e32 v3, 4.0, v3
+; GFX8-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB10_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2338,18 +2342,18 @@ define void @local_atomic_fadd_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX7-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v4
-; GFX7-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX7-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX7-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX7-NEXT: v_mov_b32_e32 v4, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX7-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX7-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX7-NEXT: v_cvt_f16_f32_e32 v3, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX7-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v3, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB10_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2370,18 +2374,18 @@ define void @local_atomic_fadd_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX6-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX6-NEXT: v_cvt_f32_f16_e32 v4, v4
-; GFX6-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX6-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX6-NEXT: v_cvt_f16_f32_e32 v4, v4
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX6-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX6-NEXT: v_mov_b32_e32 v4, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX6-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX6-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX6-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX6-NEXT: v_cvt_f16_f32_e32 v3, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX6-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v3, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB10_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2412,19 +2416,19 @@ define void @local_atomic_fadd_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX12-TRUE16-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_add_f16_e32 v4.l, 4.0, v4.l
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v3.l, 4.0, v3.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -2455,19 +2459,20 @@ define void @local_atomic_fadd_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX12-FAKE16-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_add_f16_e32 v4, 4.0, v4
-; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v3, 4.0, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -2493,15 +2498,15 @@ define void @local_atomic_fadd_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX942-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX942-NEXT: v_add_f16_e32 v4, 4.0, v4
-; GFX942-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX942-NEXT: v_mov_b32_e32 v4, v3
+; GFX942-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX942-NEXT: v_add_f16_e32 v3, 4.0, v3
+; GFX942-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v3, v4
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB11_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2524,19 +2529,19 @@ define void @local_atomic_fadd_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX11-TRUE16-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_add_f16_e32 v4.l, 4.0, v4.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v3.l, 4.0, v3.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -2561,19 +2566,20 @@ define void @local_atomic_fadd_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX11-FAKE16-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_add_f16_e32 v4, 4.0, v4
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v3, 4.0, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -2596,16 +2602,16 @@ define void @local_atomic_fadd_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX10-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX10-NEXT: v_add_f16_e32 v4, 4.0, v4
-; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; GFX10-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX10-NEXT: v_add_f16_e32 v3, 4.0, v3
+; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX10-NEXT: v_mov_b32_e32 v3, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB11_1
@@ -2628,15 +2634,15 @@ define void @local_atomic_fadd_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX90A-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX90A-NEXT: v_add_f16_e32 v4, 4.0, v4
-; GFX90A-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX90A-NEXT: v_add_f16_e32 v3, 4.0, v3
+; GFX90A-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB11_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2658,15 +2664,15 @@ define void @local_atomic_fadd_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX908-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX908-NEXT: v_add_f16_e32 v4, 4.0, v4
-; GFX908-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX908-NEXT: v_add_f16_e32 v3, 4.0, v3
+; GFX908-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB11_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2689,16 +2695,16 @@ define void @local_atomic_fadd_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX8-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX8-NEXT: v_add_f16_e32 v4, 4.0, v4
-; GFX8-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX8-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX8-NEXT: v_add_f16_e32 v3, 4.0, v3
+; GFX8-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB11_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2720,18 +2726,18 @@ define void @local_atomic_fadd_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX7-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v4
-; GFX7-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX7-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX7-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX7-NEXT: v_mov_b32_e32 v4, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX7-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX7-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX7-NEXT: v_cvt_f16_f32_e32 v3, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX7-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v3, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB11_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2753,18 +2759,18 @@ define void @local_atomic_fadd_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX6-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX6-NEXT: v_cvt_f32_f16_e32 v4, v4
-; GFX6-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX6-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX6-NEXT: v_cvt_f16_f32_e32 v4, v4
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX6-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX6-NEXT: v_mov_b32_e32 v4, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX6-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX6-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX6-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX6-NEXT: v_cvt_f16_f32_e32 v3, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX6-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v3, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB11_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3086,16 +3092,16 @@ define void @local_atomic_fadd_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX12-TRUE16-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: v_add_f16_e32 v2.l, 4.0, v1.l
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.h, 0
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v1.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v1.l, 4.0, v2.l
+; GFX12-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -3118,16 +3124,17 @@ define void @local_atomic_fadd_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX12-FAKE16-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: v_add_f16_e32 v2, 4.0, v1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v1
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX12-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v1, 4.0, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -3147,13 +3154,13 @@ define void @local_atomic_fadd_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX942-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_add_f16_e32 v2, 4.0, v1
-; GFX942-NEXT: v_and_or_b32 v2, v1, s2, v2
-; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX942-NEXT: v_mov_b32_e32 v2, v1
+; GFX942-NEXT: v_add_f16_e32 v1, 4.0, v2
+; GFX942-NEXT: v_and_or_b32 v1, v2, s2, v1
+; GFX942-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v1, v2
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB13_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3168,16 +3175,16 @@ define void @local_atomic_fadd_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX11-TRUE16-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_add_f16_e32 v2.l, 4.0, v1.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, 0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v1.l, 4.0, v2.l
+; GFX11-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -3194,16 +3201,17 @@ define void @local_atomic_fadd_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX11-FAKE16-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_add_f16_e32 v2, 4.0, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v1
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v1, 4.0, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -3220,15 +3228,15 @@ define void @local_atomic_fadd_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX10-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_add_f16_e32 v2, 4.0, v1
-; GFX10-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX10-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX10-NEXT: v_mov_b32_e32 v2, v1
+; GFX10-NEXT: v_add_f16_e32 v1, 4.0, v2
+; GFX10-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX10-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX10-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX10-NEXT: v_mov_b32_e32 v1, v2
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB13_1
@@ -3245,13 +3253,13 @@ define void @local_atomic_fadd_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX90A-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_add_f16_e32 v2, 4.0, v1
-; GFX90A-NEXT: v_and_or_b32 v2, v1, s6, v2
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX90A-NEXT: v_mov_b32_e32 v2, v1
+; GFX90A-NEXT: v_add_f16_e32 v1, 4.0, v2
+; GFX90A-NEXT: v_and_or_b32 v1, v2, s6, v1
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v1, v2
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB13_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3267,13 +3275,13 @@ define void @local_atomic_fadd_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX908-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_add_f16_e32 v2, 4.0, v1
-; GFX908-NEXT: v_and_or_b32 v2, v1, s6, v2
-; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX908-NEXT: v_mov_b32_e32 v2, v1
+; GFX908-NEXT: v_add_f16_e32 v1, 4.0, v2
+; GFX908-NEXT: v_and_or_b32 v1, v2, s6, v1
+; GFX908-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v1, v2
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB13_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3289,14 +3297,14 @@ define void @local_atomic_fadd_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX8-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_add_f16_e32 v2, 4.0, v1
-; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX8-NEXT: v_or_b32_e32 v2, v3, v2
-; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX8-NEXT: v_mov_b32_e32 v2, v1
+; GFX8-NEXT: v_add_f16_e32 v1, 4.0, v2
+; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX8-NEXT: v_or_b32_e32 v1, v3, v1
+; GFX8-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v1, v2
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB13_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3312,16 +3320,16 @@ define void @local_atomic_fadd_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX7-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v1
-; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX7-NEXT: v_add_f32_e32 v2, 4.0, v2
-; GFX7-NEXT: v_cvt_f16_f32_e32 v2, v2
-; GFX7-NEXT: v_or_b32_e32 v2, v3, v2
-; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX7-NEXT: v_mov_b32_e32 v2, v1
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v2
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX7-NEXT: v_add_f32_e32 v1, 4.0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
+; GFX7-NEXT: v_or_b32_e32 v1, v3, v1
+; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v1, v2
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB13_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3338,16 +3346,16 @@ define void @local_atomic_fadd_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX6-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cvt_f32_f16_e32 v2, v1
-; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX6-NEXT: v_add_f32_e32 v2, 4.0, v2
-; GFX6-NEXT: v_cvt_f16_f32_e32 v2, v2
-; GFX6-NEXT: v_or_b32_e32 v2, v3, v2
-; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX6-NEXT: v_mov_b32_e32 v2, v1
+; GFX6-NEXT: v_cvt_f32_f16_e32 v1, v2
+; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX6-NEXT: v_add_f32_e32 v1, 4.0, v1
+; GFX6-NEXT: v_cvt_f16_f32_e32 v1, v1
+; GFX6-NEXT: v_or_b32_e32 v1, v3, v1
+; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v1, v2
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB13_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4297,38 +4305,38 @@ define void @local_atomic_fadd_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
-; GFX12-TRUE16-NEXT: ds_load_b32 v2, v1
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2
; GFX12-TRUE16-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
-; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v5
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v5
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -4349,37 +4357,37 @@ define void @local_atomic_fadd_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
-; GFX12-FAKE16-NEXT: ds_load_b32 v2, v1
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-FAKE16-NEXT: ds_load_b32 v3, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2
; GFX12-FAKE16-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -4405,22 +4413,22 @@ define void @local_atomic_fadd_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX942-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX942-NEXT: v_mov_b32_e32 v4, v3
+; GFX942-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX942-NEXT: s_nop 0
-; GFX942-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX942-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX942-NEXT: v_add3_u32 v5, v5, v4, s2
-; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
+; GFX942-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX942-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX942-NEXT: v_add3_u32 v5, v5, v3, s2
+; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX942-NEXT: s_nop 1
-; GFX942-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX942-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX942-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX942-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v3, v4
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB16_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4433,38 +4441,38 @@ define void @local_atomic_fadd_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
-; GFX11-TRUE16-NEXT: ds_load_b32 v2, v1
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2
; GFX11-TRUE16-NEXT: .p2align 6
; GFX11-TRUE16-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v5
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -4479,37 +4487,37 @@ define void @local_atomic_fadd_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
-; GFX11-FAKE16-NEXT: ds_load_b32 v2, v1
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2
; GFX11-FAKE16-NEXT: .p2align 6
; GFX11-FAKE16-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -4524,28 +4532,28 @@ define void @local_atomic_fadd_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX10-NEXT: v_and_b32_e32 v1, -4, v0
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX10-NEXT: s_mov_b32 s4, 0
-; GFX10-NEXT: ds_read_b32 v2, v1
-; GFX10-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX10-NEXT: ds_read_b32 v3, v1
+; GFX10-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX10-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX10-NEXT: v_not_b32_e32 v3, v3
+; GFX10-NEXT: v_not_b32_e32 v2, v2
; GFX10-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshrrev_b32_sdwa v4, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX10-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX10-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX10-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX10-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX10-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX10-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v1, v2, v4
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX10-NEXT: v_mov_b32_e32 v2, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB16_1
@@ -4568,20 +4576,20 @@ define void @local_atomic_fadd_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX90A-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX90A-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX90A-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX90A-NEXT: v_add3_u32 v5, v5, v4, s6
-; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX90A-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX90A-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX90A-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX90A-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX90A-NEXT: v_add3_u32 v5, v5, v3, s6
+; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX90A-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX90A-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB16_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4603,20 +4611,20 @@ define void @local_atomic_fadd_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX908-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX908-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX908-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX908-NEXT: v_add3_u32 v5, v5, v4, s6
-; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX908-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX908-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX908-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX908-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX908-NEXT: v_add3_u32 v5, v5, v3, s6
+; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX908-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX908-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB16_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4638,22 +4646,22 @@ define void @local_atomic_fadd_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX8-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX8-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3
; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6
-; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX8-NEXT: v_cndmask_b32_e32 v4, v6, v7, vcc
-; GFX8-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v6, v7, vcc
+; GFX8-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB16_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4674,18 +4682,18 @@ define void @local_atomic_fadd_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX7-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX7-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX7-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX7-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX7-NEXT: v_mov_b32_e32 v4, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX7-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v3, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB16_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4706,18 +4714,18 @@ define void @local_atomic_fadd_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX6-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX6-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX6-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX6-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX6-NEXT: v_mov_b32_e32 v4, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX6-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v3, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB16_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4748,29 +4756,30 @@ define void @local_atomic_fadd_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX12-TRUE16-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX12-TRUE16-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v5
-; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -4801,28 +4810,29 @@ define void @local_atomic_fadd_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX12-FAKE16-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX12-FAKE16-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -4849,22 +4859,22 @@ define void @local_atomic_fadd_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX942-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX942-NEXT: v_mov_b32_e32 v4, v3
+; GFX942-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX942-NEXT: s_nop 0
-; GFX942-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX942-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX942-NEXT: v_add3_u32 v5, v5, v4, s2
-; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
+; GFX942-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX942-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX942-NEXT: v_add3_u32 v5, v5, v3, s2
+; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX942-NEXT: s_nop 1
-; GFX942-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX942-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX942-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX942-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v3, v4
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB17_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4888,28 +4898,29 @@ define void @local_atomic_fadd_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX11-TRUE16-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v5
-; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -4935,27 +4946,28 @@ define void @local_atomic_fadd_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX11-FAKE16-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -4978,21 +4990,21 @@ define void @local_atomic_fadd_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX10-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX10-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX10-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX10-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX10-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX10-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX10-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX10-NEXT: v_mov_b32_e32 v3, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB17_1
@@ -5016,20 +5028,20 @@ define void @local_atomic_fadd_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX90A-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX90A-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX90A-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX90A-NEXT: v_add3_u32 v5, v5, v4, s6
-; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX90A-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX90A-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX90A-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX90A-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX90A-NEXT: v_add3_u32 v5, v5, v3, s6
+; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX90A-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX90A-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB17_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5052,20 +5064,20 @@ define void @local_atomic_fadd_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX908-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX908-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX908-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX908-NEXT: v_add3_u32 v5, v5, v4, s6
-; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX908-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX908-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX908-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX908-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX908-NEXT: v_add3_u32 v5, v5, v3, s6
+; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX908-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX908-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB17_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5088,22 +5100,22 @@ define void @local_atomic_fadd_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX8-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX8-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3
; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6
-; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX8-NEXT: v_cndmask_b32_e32 v4, v6, v7, vcc
-; GFX8-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v6, v7, vcc
+; GFX8-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB17_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5125,18 +5137,18 @@ define void @local_atomic_fadd_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX7-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX7-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX7-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX7-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX7-NEXT: v_mov_b32_e32 v4, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX7-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v3, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB17_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5158,18 +5170,18 @@ define void @local_atomic_fadd_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX6-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX6-NEXT: v_add_f32_e32 v4, 4.0, v4
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX6-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX6-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX6-NEXT: v_mov_b32_e32 v4, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_add_f32_e32 v3, 4.0, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX6-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v3, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB17_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5569,26 +5581,27 @@ define void @local_atomic_fadd_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX12-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v1
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_add_f32_e32 v2, 4.0, v2
-; GFX12-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX12-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v1, 4.0, v1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX12-TRUE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
-; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.h
-; GFX12-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v3
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v1.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v3
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -5611,25 +5624,26 @@ define void @local_atomic_fadd_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX12-FAKE16-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v1
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_add_f32_e32 v2, 4.0, v2
-; GFX12-FAKE16-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX12-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v1, 4.0, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX12-FAKE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX12-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -5650,21 +5664,21 @@ define void @local_atomic_fadd_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX942-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX942-NEXT: v_add_f32_e32 v2, 4.0, v2
-; GFX942-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX942-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX942-NEXT: v_add3_u32 v3, v3, v2, s2
-; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
+; GFX942-NEXT: v_mov_b32_e32 v2, v1
+; GFX942-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX942-NEXT: v_add_f32_e32 v1, 4.0, v1
+; GFX942-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX942-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX942-NEXT: v_add3_u32 v3, v3, v1, s2
+; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX942-NEXT: s_nop 1
-; GFX942-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
-; GFX942-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX942-NEXT: v_and_or_b32 v2, v1, s3, v2
-; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX942-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
+; GFX942-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX942-NEXT: v_and_or_b32 v1, v2, s3, v1
+; GFX942-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v1, v2
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB19_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5680,25 +5694,26 @@ define void @local_atomic_fadd_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX11-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v1
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, 4.0, v2
-; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, 4.0, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.h
-; GFX11-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v1.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v3
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -5716,24 +5731,25 @@ define void @local_atomic_fadd_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX11-FAKE16-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v1
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, 4.0, v2
-; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v1, 4.0, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -5750,21 +5766,21 @@ define void @local_atomic_fadd_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX10-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX10-NEXT: v_add_f32_e32 v2, 4.0, v2
-; GFX10-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX10-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
-; GFX10-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX10-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX10-NEXT: v_mov_b32_e32 v2, v1
+; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX10-NEXT: v_add_f32_e32 v1, 4.0, v1
+; GFX10-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX10-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX10-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX10-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX10-NEXT: v_mov_b32_e32 v1, v2
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB19_1
@@ -5782,20 +5798,20 @@ define void @local_atomic_fadd_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX90A-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX90A-NEXT: v_add_f32_e32 v2, 4.0, v2
-; GFX90A-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX90A-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX90A-NEXT: v_add3_u32 v3, v3, v2, s6
-; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
-; GFX90A-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
-; GFX90A-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX90A-NEXT: v_and_or_b32 v2, v1, s7, v2
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX90A-NEXT: v_mov_b32_e32 v2, v1
+; GFX90A-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX90A-NEXT: v_add_f32_e32 v1, 4.0, v1
+; GFX90A-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX90A-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX90A-NEXT: v_add3_u32 v3, v3, v1, s6
+; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
+; GFX90A-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
+; GFX90A-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX90A-NEXT: v_and_or_b32 v1, v2, s7, v1
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v1, v2
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB19_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5812,20 +5828,20 @@ define void @local_atomic_fadd_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX908-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX908-NEXT: v_add_f32_e32 v2, 4.0, v2
-; GFX908-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX908-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX908-NEXT: v_add3_u32 v3, v3, v2, s6
-; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
-; GFX908-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
-; GFX908-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX908-NEXT: v_and_or_b32 v2, v1, s7, v2
-; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX908-NEXT: v_mov_b32_e32 v2, v1
+; GFX908-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX908-NEXT: v_add_f32_e32 v1, 4.0, v1
+; GFX908-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX908-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX908-NEXT: v_add3_u32 v3, v3, v1, s6
+; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
+; GFX908-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
+; GFX908-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX908-NEXT: v_and_or_b32 v1, v2, s7, v1
+; GFX908-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v1, v2
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB19_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5841,21 +5857,21 @@ define void @local_atomic_fadd_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX8-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX8-NEXT: v_add_f32_e32 v2, 4.0, v2
-; GFX8-NEXT: v_bfe_u32 v4, v2, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v2
+; GFX8-NEXT: v_mov_b32_e32 v2, v1
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX8-NEXT: v_add_f32_e32 v1, 4.0, v1
+; GFX8-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v1
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0x7fff, v4
-; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v2
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
-; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX8-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc
-; GFX8-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
+; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v4, v5, vcc
+; GFX8-NEXT: v_or_b32_sdwa v1, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v1, v2
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB19_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5871,16 +5887,16 @@ define void @local_atomic_fadd_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX7-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX7-NEXT: v_add_f32_e32 v2, 4.0, v2
-; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX7-NEXT: v_or_b32_e32 v2, v3, v2
-; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX7-NEXT: v_mov_b32_e32 v2, v1
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX7-NEXT: v_add_f32_e32 v1, 4.0, v1
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v1, v3, v1
+; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v1, v2
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB19_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5897,16 +5913,16 @@ define void @local_atomic_fadd_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX6-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX6-NEXT: v_add_f32_e32 v2, 4.0, v2
-; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX6-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX6-NEXT: v_or_b32_e32 v2, v3, v2
-; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX6-NEXT: v_mov_b32_e32 v2, v1
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX6-NEXT: v_add_f32_e32 v1, 4.0, v1
+; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX6-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_or_b32_e32 v1, v3, v1
+; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v1, v2
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB19_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6399,13 +6415,14 @@ define void @local_atomic_fadd_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX11-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_pk_add_f16 v3, v2, v1
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_pk_add_f16 v2, v3, v1
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v2
+; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v3
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX11-NEXT: v_mov_b32_e32 v2, v3
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -6422,13 +6439,13 @@ define void @local_atomic_fadd_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX10-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_pk_add_f16 v3, v2, v1
+; GFX10-NEXT: v_mov_b32_e32 v3, v2
+; GFX10-NEXT: v_pk_add_f16 v2, v3, v1
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3
+; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX10-NEXT: v_mov_b32_e32 v2, v3
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB22_1
@@ -6444,12 +6461,12 @@ define void @local_atomic_fadd_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX90A-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_pk_add_f16 v3, v2, v1
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3
+; GFX90A-NEXT: v_mov_b32_e32 v3, v2
+; GFX90A-NEXT: v_pk_add_f16 v2, v3, v1
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v2, v3
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB22_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6464,12 +6481,12 @@ define void @local_atomic_fadd_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX908-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_pk_add_f16 v3, v2, v1
-; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3
+; GFX908-NEXT: v_mov_b32_e32 v3, v2
+; GFX908-NEXT: v_pk_add_f16 v2, v3, v1
+; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v2, v3
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB22_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6485,14 +6502,14 @@ define void @local_atomic_fadd_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX8-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_add_f16_sdwa v3, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_add_f16_e32 v4, v2, v1
-; GFX8-NEXT: v_or_b32_e32 v3, v4, v3
-; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3
+; GFX8-NEXT: v_mov_b32_e32 v3, v2
+; GFX8-NEXT: v_add_f16_sdwa v2, v3, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_e32 v4, v3, v1
+; GFX8-NEXT: v_or_b32_e32 v2, v4, v2
+; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v2, v3
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB22_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6614,13 +6631,14 @@ define void @local_atomic_fadd_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX11-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_pk_add_f16 v3, v2, v1
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_pk_add_f16 v2, v3, v1
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v2 offset:65532
+; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v3 offset:65532
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX11-NEXT: v_mov_b32_e32 v2, v3
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -6637,13 +6655,13 @@ define void @local_atomic_fadd_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX10-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_pk_add_f16 v3, v2, v1
+; GFX10-NEXT: v_mov_b32_e32 v3, v2
+; GFX10-NEXT: v_pk_add_f16 v2, v3, v1
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532
+; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX10-NEXT: v_mov_b32_e32 v2, v3
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB23_1
@@ -6659,12 +6677,12 @@ define void @local_atomic_fadd_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX90A-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_pk_add_f16 v3, v2, v1
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532
+; GFX90A-NEXT: v_mov_b32_e32 v3, v2
+; GFX90A-NEXT: v_pk_add_f16 v2, v3, v1
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v2, v3
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB23_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6679,12 +6697,12 @@ define void @local_atomic_fadd_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX908-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_pk_add_f16 v3, v2, v1
-; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532
+; GFX908-NEXT: v_mov_b32_e32 v3, v2
+; GFX908-NEXT: v_pk_add_f16 v2, v3, v1
+; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v2, v3
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB23_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6700,14 +6718,14 @@ define void @local_atomic_fadd_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX8-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_add_f16_sdwa v3, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_add_f16_e32 v4, v2, v1
-; GFX8-NEXT: v_or_b32_e32 v3, v4, v3
-; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532
+; GFX8-NEXT: v_mov_b32_e32 v3, v2
+; GFX8-NEXT: v_add_f16_sdwa v2, v3, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_add_f16_e32 v4, v3, v1
+; GFX8-NEXT: v_or_b32_e32 v2, v4, v2
+; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v2, v3
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB23_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7547,30 +7565,32 @@ define void @local_atomic_fadd_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX11-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, v3, v1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v5, v3
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v5, v4
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -7591,30 +7611,32 @@ define void @local_atomic_fadd_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX11-FAKE16-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, v4, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v5, v5, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, v3, v2
; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v6, v8, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v5, v3, 0x7060302
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
@@ -7634,27 +7656,27 @@ define void @local_atomic_fadd_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX10-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX10-NEXT: v_add_f32_e32 v4, v4, v2
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX10-NEXT: v_add_f32_e32 v3, v3, v2
; GFX10-NEXT: v_add_f32_e32 v5, v5, v1
-; GFX10-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX10-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX10-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v3
; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX10-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX10-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
; GFX10-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX10-NEXT: v_cmp_u_f32_e64 s4, v4, v4
+; GFX10-NEXT: v_cmp_u_f32_e64 s4, v3, v3
; GFX10-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e64 v4, v6, v8, s4
-; GFX10-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v6, v8, s4
+; GFX10-NEXT: v_perm_b32 v3, v5, v3, 0x7060302
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX10-NEXT: v_mov_b32_e32 v3, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5
; GFX10-NEXT: s_cbranch_execnz .LBB26_1
@@ -7674,26 +7696,26 @@ define void @local_atomic_fadd_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX90A-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX90A-NEXT: v_add_f32_e32 v4, v4, v2
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX90A-NEXT: v_add_f32_e32 v3, v3, v2
; GFX90A-NEXT: v_add_f32_e32 v5, v5, v1
-; GFX90A-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX90A-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX90A-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX90A-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX90A-NEXT: v_add3_u32 v6, v6, v4, s8
+; GFX90A-NEXT: v_add3_u32 v6, v6, v3, s8
; GFX90A-NEXT: v_add3_u32 v8, v8, v5, s8
; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
-; GFX90A-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
+; GFX90A-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX90A-NEXT: v_perm_b32 v4, v5, v4, s9
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX90A-NEXT: v_perm_b32 v3, v5, v3, s9
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX90A-NEXT: s_cbranch_execnz .LBB26_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7712,26 +7734,26 @@ define void @local_atomic_fadd_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX908-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX908-NEXT: v_add_f32_e32 v4, v4, v2
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX908-NEXT: v_add_f32_e32 v3, v3, v2
; GFX908-NEXT: v_add_f32_e32 v5, v5, v1
-; GFX908-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX908-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX908-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX908-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX908-NEXT: v_add3_u32 v6, v6, v4, s8
+; GFX908-NEXT: v_add3_u32 v6, v6, v3, s8
; GFX908-NEXT: v_add3_u32 v8, v8, v5, s8
; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
-; GFX908-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
+; GFX908-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX908-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX908-NEXT: v_perm_b32 v4, v5, v4, s9
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX908-NEXT: v_perm_b32 v3, v5, v3, s9
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX908-NEXT: s_cbranch_execnz .LBB26_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7749,29 +7771,29 @@ define void @local_atomic_fadd_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX8-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX8-NEXT: v_add_f32_e32 v4, v4, v2
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX8-NEXT: v_add_f32_e32 v3, v3, v2
; GFX8-NEXT: v_add_f32_e32 v5, v5, v1
-; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX8-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3
; GFX8-NEXT: v_add_u32_e32 v8, vcc, v8, v5
; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6
; GFX8-NEXT: v_add_u32_e32 v8, vcc, 0x7fff, v8
; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4
-; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
+; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3
+; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
; GFX8-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX8-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX8-NEXT: v_alignbit_b32 v4, v5, v4, 16
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX8-NEXT: v_alignbit_b32 v3, v5, v3, 16
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX8-NEXT: s_cbranch_execnz .LBB26_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7888,30 +7910,32 @@ define void @local_atomic_fadd_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX11-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_dual_add_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, v3, v1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v5, v3 offset:65532
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v5, v4 offset:65532
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -7932,30 +7956,32 @@ define void @local_atomic_fadd_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX11-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_dual_add_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, v4, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v5, v5, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, v3, v2
; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v6, v8, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v5, v3, 0x7060302
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 offset:65532
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 offset:65532
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
@@ -7975,27 +8001,27 @@ define void @local_atomic_fadd_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX10-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX10-NEXT: v_add_f32_e32 v4, v4, v2
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX10-NEXT: v_add_f32_e32 v3, v3, v2
; GFX10-NEXT: v_add_f32_e32 v5, v5, v1
-; GFX10-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX10-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX10-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v3
; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX10-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX10-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
; GFX10-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX10-NEXT: v_cmp_u_f32_e64 s4, v4, v4
+; GFX10-NEXT: v_cmp_u_f32_e64 s4, v3, v3
; GFX10-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e64 v4, v6, v8, s4
-; GFX10-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v6, v8, s4
+; GFX10-NEXT: v_perm_b32 v3, v5, v3, 0x7060302
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX10-NEXT: v_mov_b32_e32 v3, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5
; GFX10-NEXT: s_cbranch_execnz .LBB27_1
@@ -8015,26 +8041,26 @@ define void @local_atomic_fadd_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX90A-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX90A-NEXT: v_add_f32_e32 v4, v4, v2
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX90A-NEXT: v_add_f32_e32 v3, v3, v2
; GFX90A-NEXT: v_add_f32_e32 v5, v5, v1
-; GFX90A-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX90A-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX90A-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX90A-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX90A-NEXT: v_add3_u32 v6, v6, v4, s8
+; GFX90A-NEXT: v_add3_u32 v6, v6, v3, s8
; GFX90A-NEXT: v_add3_u32 v8, v8, v5, s8
; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
-; GFX90A-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
+; GFX90A-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX90A-NEXT: v_perm_b32 v4, v5, v4, s9
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532
+; GFX90A-NEXT: v_perm_b32 v3, v5, v3, s9
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX90A-NEXT: s_cbranch_execnz .LBB27_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -8053,26 +8079,26 @@ define void @local_atomic_fadd_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX908-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX908-NEXT: v_add_f32_e32 v4, v4, v2
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX908-NEXT: v_add_f32_e32 v3, v3, v2
; GFX908-NEXT: v_add_f32_e32 v5, v5, v1
-; GFX908-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX908-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX908-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX908-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX908-NEXT: v_add3_u32 v6, v6, v4, s8
+; GFX908-NEXT: v_add3_u32 v6, v6, v3, s8
; GFX908-NEXT: v_add3_u32 v8, v8, v5, s8
; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
-; GFX908-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
+; GFX908-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX908-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX908-NEXT: v_perm_b32 v4, v5, v4, s9
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532
+; GFX908-NEXT: v_perm_b32 v3, v5, v3, s9
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX908-NEXT: s_cbranch_execnz .LBB27_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -8090,29 +8116,29 @@ define void @local_atomic_fadd_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX8-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX8-NEXT: v_add_f32_e32 v4, v4, v2
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX8-NEXT: v_add_f32_e32 v3, v3, v2
; GFX8-NEXT: v_add_f32_e32 v5, v5, v1
-; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX8-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3
; GFX8-NEXT: v_add_u32_e32 v8, vcc, v8, v5
; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6
; GFX8-NEXT: v_add_u32_e32 v8, vcc, 0x7fff, v8
; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4
-; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
+; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3
+; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
; GFX8-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX8-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX8-NEXT: v_alignbit_b32 v4, v5, v4, 16
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532
+; GFX8-NEXT: v_alignbit_b32 v3, v5, v3, 16
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX8-NEXT: s_cbranch_execnz .LBB27_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -8849,20 +8875,20 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX7-NEXT: ; %bb.5:
; GFX7-NEXT: s_lshl_b32 s0, s3, 4
; GFX7-NEXT: v_mov_b32_e32 v1, s0
-; GFX7-NEXT: ds_read_b32 v3, v1
+; GFX7-NEXT: ds_read_b32 v2, v1
; GFX7-NEXT: s_bcnt1_i32_b64 s0, s[8:9]
-; GFX7-NEXT: v_cvt_f32_ubyte0_e32 v2, s0
-; GFX7-NEXT: v_mul_f32_e32 v2, 0x42280000, v2
+; GFX7-NEXT: v_cvt_f32_ubyte0_e32 v3, s0
+; GFX7-NEXT: v_mul_f32_e32 v3, 0x42280000, v3
; GFX7-NEXT: s_mov_b64 s[8:9], 0
; GFX7-NEXT: .LBB28_6: ; %atomicrmw.start2
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_add_f32_e32 v4, v3, v2
-; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX7-NEXT: v_mov_b32_e32 v4, v2
+; GFX7-NEXT: v_add_f32_e32 v2, v4, v3
+; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v1, v4, v2
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e64 s[0:1], v4, v3
+; GFX7-NEXT: v_cmp_eq_u32_e64 s[0:1], v2, v4
; GFX7-NEXT: s_or_b64 s[8:9], s[0:1], s[8:9]
-; GFX7-NEXT: v_mov_b32_e32 v3, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GFX7-NEXT: s_cbranch_execnz .LBB28_6
; GFX7-NEXT: .LBB28_7: ; %Flow21
@@ -8973,20 +8999,20 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX6-NEXT: ; %bb.5:
; GFX6-NEXT: s_lshl_b32 s0, s3, 4
; GFX6-NEXT: v_mov_b32_e32 v1, s0
-; GFX6-NEXT: ds_read_b32 v3, v1
+; GFX6-NEXT: ds_read_b32 v2, v1
; GFX6-NEXT: s_bcnt1_i32_b64 s0, s[8:9]
-; GFX6-NEXT: v_cvt_f32_ubyte0_e32 v2, s0
-; GFX6-NEXT: v_mul_f32_e32 v2, 0x42280000, v2
+; GFX6-NEXT: v_cvt_f32_ubyte0_e32 v3, s0
+; GFX6-NEXT: v_mul_f32_e32 v3, 0x42280000, v3
; GFX6-NEXT: s_mov_b64 s[8:9], 0
; GFX6-NEXT: .LBB28_6: ; %atomicrmw.start2
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_add_f32_e32 v4, v3, v2
-; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX6-NEXT: v_mov_b32_e32 v4, v2
+; GFX6-NEXT: v_add_f32_e32 v2, v4, v3
+; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v1, v4, v2
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e64 s[0:1], v4, v3
+; GFX6-NEXT: v_cmp_eq_u32_e64 s[0:1], v2, v4
; GFX6-NEXT: s_or_b64 s[8:9], s[0:1], s[8:9]
-; GFX6-NEXT: v_mov_b32_e32 v3, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GFX6-NEXT: s_cbranch_execnz .LBB28_6
; GFX6-NEXT: .LBB28_7: ; %Flow19
@@ -9677,20 +9703,20 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX7-NEXT: ; %bb.5:
; GFX7-NEXT: s_lshl_b32 s0, s3, 4
; GFX7-NEXT: v_mov_b32_e32 v1, s0
-; GFX7-NEXT: ds_read_b32 v3, v1
+; GFX7-NEXT: ds_read_b32 v2, v1
; GFX7-NEXT: s_bcnt1_i32_b64 s0, s[8:9]
-; GFX7-NEXT: v_cvt_f32_ubyte0_e32 v2, s0
-; GFX7-NEXT: v_mul_f32_e32 v2, 0x42280000, v2
+; GFX7-NEXT: v_cvt_f32_ubyte0_e32 v3, s0
+; GFX7-NEXT: v_mul_f32_e32 v3, 0x42280000, v3
; GFX7-NEXT: s_mov_b64 s[8:9], 0
; GFX7-NEXT: .LBB29_6: ; %atomicrmw.start2
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_add_f32_e32 v4, v3, v2
-; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX7-NEXT: v_mov_b32_e32 v4, v2
+; GFX7-NEXT: v_add_f32_e32 v2, v4, v3
+; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v1, v4, v2
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e64 s[0:1], v4, v3
+; GFX7-NEXT: v_cmp_eq_u32_e64 s[0:1], v2, v4
; GFX7-NEXT: s_or_b64 s[8:9], s[0:1], s[8:9]
-; GFX7-NEXT: v_mov_b32_e32 v3, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GFX7-NEXT: s_cbranch_execnz .LBB29_6
; GFX7-NEXT: .LBB29_7: ; %Flow21
@@ -9801,20 +9827,20 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX6-NEXT: ; %bb.5:
; GFX6-NEXT: s_lshl_b32 s0, s3, 4
; GFX6-NEXT: v_mov_b32_e32 v1, s0
-; GFX6-NEXT: ds_read_b32 v3, v1
+; GFX6-NEXT: ds_read_b32 v2, v1
; GFX6-NEXT: s_bcnt1_i32_b64 s0, s[8:9]
-; GFX6-NEXT: v_cvt_f32_ubyte0_e32 v2, s0
-; GFX6-NEXT: v_mul_f32_e32 v2, 0x42280000, v2
+; GFX6-NEXT: v_cvt_f32_ubyte0_e32 v3, s0
+; GFX6-NEXT: v_mul_f32_e32 v3, 0x42280000, v3
; GFX6-NEXT: s_mov_b64 s[8:9], 0
; GFX6-NEXT: .LBB29_6: ; %atomicrmw.start2
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_add_f32_e32 v4, v3, v2
-; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX6-NEXT: v_mov_b32_e32 v4, v2
+; GFX6-NEXT: v_add_f32_e32 v2, v4, v3
+; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v1, v4, v2
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e64 s[0:1], v4, v3
+; GFX6-NEXT: v_cmp_eq_u32_e64 s[0:1], v2, v4
; GFX6-NEXT: s_or_b64 s[8:9], s[0:1], s[8:9]
-; GFX6-NEXT: v_mov_b32_e32 v3, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GFX6-NEXT: s_cbranch_execnz .LBB29_6
; GFX6-NEXT: .LBB29_7: ; %Flow19
@@ -10084,12 +10110,12 @@ define void @local_atomic_fadd_noret_f32__amdgpu_ignore_denormal_mode(ptr addrsp
; GFX7-NEXT: .LBB31_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_add_f32_e32 v2, 4.0, v1
-; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX7-NEXT: v_mov_b32_e32 v2, v1
+; GFX7-NEXT: v_add_f32_e32 v1, 4.0, v2
+; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v1, v2
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB31_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -10105,12 +10131,12 @@ define void @local_atomic_fadd_noret_f32__amdgpu_ignore_denormal_mode(ptr addrsp
; GFX6-NEXT: .LBB31_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_add_f32_e32 v2, 4.0, v1
-; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX6-NEXT: v_mov_b32_e32 v2, v1
+; GFX6-NEXT: v_add_f32_e32 v1, 4.0, v2
+; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v1, v2
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB31_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
diff --git a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fmax.ll b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fmax.ll
index d6b7d8f..8e094a7 100644
--- a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fmax.ll
+++ b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fmax.ll
@@ -1598,29 +1598,29 @@ define void @local_atomic_fmax_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
-; GFX12-TRUE16-NEXT: ds_load_b32 v2, v1
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2
; GFX12-TRUE16-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0
-; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v4.l, v4.l, v4.l
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v3.l, v3.l, v3.l
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v4.l, 4.0, v4.l
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v3.l, 4.0, v3.l
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -1641,29 +1641,29 @@ define void @local_atomic_fmax_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
-; GFX12-FAKE16-NEXT: ds_load_b32 v2, v1
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-FAKE16-NEXT: ds_load_b32 v3, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2
; GFX12-FAKE16-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v4, v4, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v3, v3, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v4, 4.0, v4
-; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v3, 4.0, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -1688,16 +1688,16 @@ define void @local_atomic_fmax_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX942-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX942-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX942-NEXT: v_max_f16_e32 v4, 4.0, v4
-; GFX942-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX942-NEXT: v_mov_b32_e32 v4, v3
+; GFX942-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX942-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX942-NEXT: v_max_f16_e32 v3, 4.0, v3
+; GFX942-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v3, v4
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB10_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1710,29 +1710,29 @@ define void @local_atomic_fmax_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
-; GFX11-TRUE16-NEXT: ds_load_b32 v2, v1
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2
; GFX11-TRUE16-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0
-; GFX11-TRUE16-NEXT: v_max_f16_e32 v4.l, v4.l, v4.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v3.l, v3.l, v3.l
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_max_f16_e32 v4.l, 4.0, v4.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v3.l, 4.0, v3.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -1747,29 +1747,29 @@ define void @local_atomic_fmax_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
-; GFX11-FAKE16-NEXT: ds_load_b32 v2, v1
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2
; GFX11-FAKE16-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX11-FAKE16-NEXT: v_max_f16_e32 v4, v4, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v3, v3, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_max_f16_e32 v4, 4.0, v4
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v3, 4.0, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -1784,24 +1784,24 @@ define void @local_atomic_fmax_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX10-NEXT: v_and_b32_e32 v1, -4, v0
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX10-NEXT: s_mov_b32 s4, 0
-; GFX10-NEXT: ds_read_b32 v2, v1
-; GFX10-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX10-NEXT: ds_read_b32 v3, v1
+; GFX10-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX10-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX10-NEXT: v_not_b32_e32 v3, v3
+; GFX10-NEXT: v_not_b32_e32 v2, v2
; GFX10-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX10-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX10-NEXT: v_max_f16_e32 v4, 4.0, v4
-; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; GFX10-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX10-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX10-NEXT: v_max_f16_e32 v3, 4.0, v3
+; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v1, v2, v4
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX10-NEXT: v_mov_b32_e32 v2, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB10_1
@@ -1823,16 +1823,16 @@ define void @local_atomic_fmax_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX90A-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX90A-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX90A-NEXT: v_max_f16_e32 v4, 4.0, v4
-; GFX90A-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX90A-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX90A-NEXT: v_max_f16_e32 v3, 4.0, v3
+; GFX90A-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB10_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1853,16 +1853,16 @@ define void @local_atomic_fmax_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX908-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX908-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX908-NEXT: v_max_f16_e32 v4, 4.0, v4
-; GFX908-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX908-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX908-NEXT: v_max_f16_e32 v3, 4.0, v3
+; GFX908-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB10_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1884,17 +1884,17 @@ define void @local_atomic_fmax_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX8-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX8-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX8-NEXT: v_max_f16_e32 v4, 4.0, v4
-; GFX8-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX8-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX8-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX8-NEXT: v_max_f16_e32 v3, 4.0, v3
+; GFX8-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB10_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1915,18 +1915,18 @@ define void @local_atomic_fmax_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX7-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v4
-; GFX7-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX7-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX7-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX7-NEXT: v_mov_b32_e32 v4, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX7-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX7-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX7-NEXT: v_cvt_f16_f32_e32 v3, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX7-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v3, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB10_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1947,18 +1947,18 @@ define void @local_atomic_fmax_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX6-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX6-NEXT: v_cvt_f32_f16_e32 v4, v4
-; GFX6-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX6-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX6-NEXT: v_cvt_f16_f32_e32 v4, v4
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX6-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX6-NEXT: v_mov_b32_e32 v4, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX6-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX6-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX6-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX6-NEXT: v_cvt_f16_f32_e32 v3, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX6-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v3, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB10_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1989,20 +1989,21 @@ define void @local_atomic_fmax_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX12-TRUE16-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v4.l, v4.l, v4.l
-; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v4.l, 4.0, v4.l
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v3.l, v3.l, v3.l
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v3.l, 4.0, v3.l
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -2033,21 +2034,21 @@ define void @local_atomic_fmax_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX12-FAKE16-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v4, v4, v4
-; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v4, 4.0, v4
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v3, v3, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v3, 4.0, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -2073,16 +2074,16 @@ define void @local_atomic_fmax_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX942-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX942-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX942-NEXT: v_max_f16_e32 v4, 4.0, v4
-; GFX942-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX942-NEXT: v_mov_b32_e32 v4, v3
+; GFX942-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX942-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX942-NEXT: v_max_f16_e32 v3, 4.0, v3
+; GFX942-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v3, v4
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB11_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2105,20 +2106,21 @@ define void @local_atomic_fmax_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX11-TRUE16-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_max_f16_e32 v4.l, v4.l, v4.l
-; GFX11-TRUE16-NEXT: v_max_f16_e32 v4.l, 4.0, v4.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v3.l, v3.l, v3.l
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v3.l, 4.0, v3.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -2143,21 +2145,21 @@ define void @local_atomic_fmax_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX11-FAKE16-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX11-FAKE16-NEXT: v_max_f16_e32 v4, 4.0, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v3, v3, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v3, 4.0, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -2180,17 +2182,17 @@ define void @local_atomic_fmax_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX10-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX10-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX10-NEXT: v_max_f16_e32 v4, 4.0, v4
-; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; GFX10-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX10-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX10-NEXT: v_max_f16_e32 v3, 4.0, v3
+; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX10-NEXT: v_mov_b32_e32 v3, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB11_1
@@ -2213,16 +2215,16 @@ define void @local_atomic_fmax_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX90A-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX90A-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX90A-NEXT: v_max_f16_e32 v4, 4.0, v4
-; GFX90A-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX90A-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX90A-NEXT: v_max_f16_e32 v3, 4.0, v3
+; GFX90A-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB11_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2244,16 +2246,16 @@ define void @local_atomic_fmax_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX908-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX908-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX908-NEXT: v_max_f16_e32 v4, 4.0, v4
-; GFX908-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX908-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX908-NEXT: v_max_f16_e32 v3, 4.0, v3
+; GFX908-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB11_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2276,17 +2278,17 @@ define void @local_atomic_fmax_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX8-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX8-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX8-NEXT: v_max_f16_e32 v4, 4.0, v4
-; GFX8-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX8-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX8-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX8-NEXT: v_max_f16_e32 v3, 4.0, v3
+; GFX8-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB11_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2308,18 +2310,18 @@ define void @local_atomic_fmax_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX7-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v4
-; GFX7-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX7-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX7-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX7-NEXT: v_mov_b32_e32 v4, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX7-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX7-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX7-NEXT: v_cvt_f16_f32_e32 v3, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX7-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v3, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB11_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2341,18 +2343,18 @@ define void @local_atomic_fmax_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX6-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX6-NEXT: v_cvt_f32_f16_e32 v4, v4
-; GFX6-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX6-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX6-NEXT: v_cvt_f16_f32_e32 v4, v4
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX6-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX6-NEXT: v_mov_b32_e32 v4, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX6-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX6-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX6-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX6-NEXT: v_cvt_f16_f32_e32 v3, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX6-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v3, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB11_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2685,17 +2687,18 @@ define void @local_atomic_fmax_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX12-TRUE16-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v2.l, v1.l, v1.l
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v1.h, 0
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v2.l, 4.0, v2.l
-; GFX12-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v1.l, v2.l, v2.l
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v1.l, 4.0, v1.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -2718,18 +2721,18 @@ define void @local_atomic_fmax_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX12-FAKE16-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v1, v1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v1
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, 4.0, v2
-; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v1, v2, v2
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v1, 4.0, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX12-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -2749,14 +2752,14 @@ define void @local_atomic_fmax_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX942-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_max_f16_e32 v2, v1, v1
-; GFX942-NEXT: v_max_f16_e32 v2, 4.0, v2
-; GFX942-NEXT: v_and_or_b32 v2, v1, s2, v2
-; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX942-NEXT: v_mov_b32_e32 v2, v1
+; GFX942-NEXT: v_max_f16_e32 v1, v2, v2
+; GFX942-NEXT: v_max_f16_e32 v1, 4.0, v1
+; GFX942-NEXT: v_and_or_b32 v1, v2, s2, v1
+; GFX942-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v1, v2
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB13_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2771,17 +2774,18 @@ define void @local_atomic_fmax_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX11-TRUE16-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_max_f16_e32 v2.l, v1.l, v1.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, 0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_max_f16_e32 v2.l, 4.0, v2.l
-; GFX11-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v1.l, v2.l, v2.l
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v1.l, 4.0, v1.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -2798,18 +2802,18 @@ define void @local_atomic_fmax_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX11-FAKE16-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v1, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v1
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, 4.0, v2
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v1, v2, v2
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v1, 4.0, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -2826,16 +2830,16 @@ define void @local_atomic_fmax_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX10-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_max_f16_e32 v2, v1, v1
-; GFX10-NEXT: v_max_f16_e32 v2, 4.0, v2
-; GFX10-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX10-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX10-NEXT: v_mov_b32_e32 v2, v1
+; GFX10-NEXT: v_max_f16_e32 v1, v2, v2
+; GFX10-NEXT: v_max_f16_e32 v1, 4.0, v1
+; GFX10-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX10-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX10-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX10-NEXT: v_mov_b32_e32 v1, v2
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB13_1
@@ -2852,14 +2856,14 @@ define void @local_atomic_fmax_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX90A-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_max_f16_e32 v2, v1, v1
-; GFX90A-NEXT: v_max_f16_e32 v2, 4.0, v2
-; GFX90A-NEXT: v_and_or_b32 v2, v1, s6, v2
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX90A-NEXT: v_mov_b32_e32 v2, v1
+; GFX90A-NEXT: v_max_f16_e32 v1, v2, v2
+; GFX90A-NEXT: v_max_f16_e32 v1, 4.0, v1
+; GFX90A-NEXT: v_and_or_b32 v1, v2, s6, v1
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v1, v2
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB13_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2875,14 +2879,14 @@ define void @local_atomic_fmax_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX908-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_max_f16_e32 v2, v1, v1
-; GFX908-NEXT: v_max_f16_e32 v2, 4.0, v2
-; GFX908-NEXT: v_and_or_b32 v2, v1, s6, v2
-; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX908-NEXT: v_mov_b32_e32 v2, v1
+; GFX908-NEXT: v_max_f16_e32 v1, v2, v2
+; GFX908-NEXT: v_max_f16_e32 v1, 4.0, v1
+; GFX908-NEXT: v_and_or_b32 v1, v2, s6, v1
+; GFX908-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v1, v2
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB13_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2898,15 +2902,15 @@ define void @local_atomic_fmax_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX8-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_max_f16_e32 v2, v1, v1
-; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX8-NEXT: v_max_f16_e32 v2, 4.0, v2
-; GFX8-NEXT: v_or_b32_e32 v2, v3, v2
-; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX8-NEXT: v_mov_b32_e32 v2, v1
+; GFX8-NEXT: v_max_f16_e32 v1, v2, v2
+; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX8-NEXT: v_max_f16_e32 v1, 4.0, v1
+; GFX8-NEXT: v_or_b32_e32 v1, v3, v1
+; GFX8-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v1, v2
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB13_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2922,16 +2926,16 @@ define void @local_atomic_fmax_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX7-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v1
-; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX7-NEXT: v_max_f32_e32 v2, 4.0, v2
-; GFX7-NEXT: v_cvt_f16_f32_e32 v2, v2
-; GFX7-NEXT: v_or_b32_e32 v2, v3, v2
-; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX7-NEXT: v_mov_b32_e32 v2, v1
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v2
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX7-NEXT: v_max_f32_e32 v1, 4.0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
+; GFX7-NEXT: v_or_b32_e32 v1, v3, v1
+; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v1, v2
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB13_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2948,16 +2952,16 @@ define void @local_atomic_fmax_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX6-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cvt_f32_f16_e32 v2, v1
-; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX6-NEXT: v_max_f32_e32 v2, 4.0, v2
-; GFX6-NEXT: v_cvt_f16_f32_e32 v2, v2
-; GFX6-NEXT: v_or_b32_e32 v2, v3, v2
-; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX6-NEXT: v_mov_b32_e32 v2, v1
+; GFX6-NEXT: v_cvt_f32_f16_e32 v1, v2
+; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX6-NEXT: v_max_f32_e32 v1, 4.0, v1
+; GFX6-NEXT: v_cvt_f16_f32_e32 v1, v1
+; GFX6-NEXT: v_or_b32_e32 v1, v3, v1
+; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v1, v2
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB13_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3911,38 +3915,38 @@ define void @local_atomic_fmax_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
-; GFX12-TRUE16-NEXT: ds_load_b32 v2, v1
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2
; GFX12-TRUE16-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v4, 4.0, v4
-; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v3, 4.0, v3
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
-; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v5
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v5
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -3963,37 +3967,37 @@ define void @local_atomic_fmax_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
-; GFX12-FAKE16-NEXT: ds_load_b32 v2, v1
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-FAKE16-NEXT: ds_load_b32 v3, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2
; GFX12-FAKE16-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v4, 4.0, v4
-; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v3, 4.0, v3
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -4019,22 +4023,22 @@ define void @local_atomic_fmax_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX942-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX942-NEXT: v_mov_b32_e32 v4, v3
+; GFX942-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX942-NEXT: s_nop 0
-; GFX942-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX942-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX942-NEXT: v_add3_u32 v5, v5, v4, s2
-; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
+; GFX942-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX942-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX942-NEXT: v_add3_u32 v5, v5, v3, s2
+; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX942-NEXT: s_nop 1
-; GFX942-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX942-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX942-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX942-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v3, v4
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB16_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4047,38 +4051,38 @@ define void @local_atomic_fmax_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
-; GFX11-TRUE16-NEXT: ds_load_b32 v2, v1
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2
; GFX11-TRUE16-NEXT: .p2align 6
; GFX11-TRUE16-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v5
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -4093,37 +4097,37 @@ define void @local_atomic_fmax_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
-; GFX11-FAKE16-NEXT: ds_load_b32 v2, v1
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2
; GFX11-FAKE16-NEXT: .p2align 6
; GFX11-FAKE16-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -4138,28 +4142,28 @@ define void @local_atomic_fmax_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX10-NEXT: v_and_b32_e32 v1, -4, v0
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX10-NEXT: s_mov_b32 s4, 0
-; GFX10-NEXT: ds_read_b32 v2, v1
-; GFX10-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX10-NEXT: ds_read_b32 v3, v1
+; GFX10-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX10-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX10-NEXT: v_not_b32_e32 v3, v3
+; GFX10-NEXT: v_not_b32_e32 v2, v2
; GFX10-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshrrev_b32_sdwa v4, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX10-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX10-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX10-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX10-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX10-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX10-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v1, v2, v4
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX10-NEXT: v_mov_b32_e32 v2, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB16_1
@@ -4182,20 +4186,20 @@ define void @local_atomic_fmax_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX90A-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX90A-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX90A-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX90A-NEXT: v_add3_u32 v5, v5, v4, s6
-; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX90A-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX90A-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX90A-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX90A-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX90A-NEXT: v_add3_u32 v5, v5, v3, s6
+; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX90A-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX90A-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB16_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4217,20 +4221,20 @@ define void @local_atomic_fmax_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX908-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX908-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX908-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX908-NEXT: v_add3_u32 v5, v5, v4, s6
-; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX908-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX908-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX908-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX908-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX908-NEXT: v_add3_u32 v5, v5, v3, s6
+; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX908-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX908-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB16_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4252,22 +4256,22 @@ define void @local_atomic_fmax_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX8-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX8-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3
; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6
-; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX8-NEXT: v_cndmask_b32_e32 v4, v6, v7, vcc
-; GFX8-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v6, v7, vcc
+; GFX8-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB16_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4288,19 +4292,19 @@ define void @local_atomic_fmax_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX7-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; GFX7-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX7-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX7-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX7-NEXT: v_mov_b32_e32 v4, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
+; GFX7-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX7-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v3, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB16_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4321,19 +4325,19 @@ define void @local_atomic_fmax_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX6-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX6-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; GFX6-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX6-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX6-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX6-NEXT: v_mov_b32_e32 v4, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_mul_f32_e32 v3, 1.0, v3
+; GFX6-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX6-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v3, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB16_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4364,29 +4368,30 @@ define void @local_atomic_fmax_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX12-TRUE16-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v4, 4.0, v4
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v3, 4.0, v3
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v5
-; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -4417,28 +4422,29 @@ define void @local_atomic_fmax_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX12-FAKE16-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v4, 4.0, v4
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v3, 4.0, v3
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -4465,22 +4471,22 @@ define void @local_atomic_fmax_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX942-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX942-NEXT: v_mov_b32_e32 v4, v3
+; GFX942-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX942-NEXT: s_nop 0
-; GFX942-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX942-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX942-NEXT: v_add3_u32 v5, v5, v4, s2
-; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
+; GFX942-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX942-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX942-NEXT: v_add3_u32 v5, v5, v3, s2
+; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX942-NEXT: s_nop 1
-; GFX942-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX942-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX942-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX942-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v3, v4
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB17_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4504,28 +4510,29 @@ define void @local_atomic_fmax_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX11-TRUE16-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX11-TRUE16-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v5
-; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -4551,27 +4558,28 @@ define void @local_atomic_fmax_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX11-FAKE16-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX11-FAKE16-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -4594,21 +4602,21 @@ define void @local_atomic_fmax_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX10-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX10-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX10-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX10-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX10-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX10-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX10-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX10-NEXT: v_mov_b32_e32 v3, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB17_1
@@ -4632,20 +4640,20 @@ define void @local_atomic_fmax_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX90A-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX90A-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX90A-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX90A-NEXT: v_add3_u32 v5, v5, v4, s6
-; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX90A-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX90A-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX90A-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX90A-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX90A-NEXT: v_add3_u32 v5, v5, v3, s6
+; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX90A-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX90A-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB17_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4668,20 +4676,20 @@ define void @local_atomic_fmax_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX908-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX908-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX908-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX908-NEXT: v_add3_u32 v5, v5, v4, s6
-; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX908-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX908-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX908-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX908-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX908-NEXT: v_add3_u32 v5, v5, v3, s6
+; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX908-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX908-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB17_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4704,22 +4712,22 @@ define void @local_atomic_fmax_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX8-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX8-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3
; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6
-; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX8-NEXT: v_cndmask_b32_e32 v4, v6, v7, vcc
-; GFX8-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v6, v7, vcc
+; GFX8-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB17_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4741,19 +4749,19 @@ define void @local_atomic_fmax_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX7-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; GFX7-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX7-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX7-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX7-NEXT: v_mov_b32_e32 v4, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
+; GFX7-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX7-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v3, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB17_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4775,19 +4783,19 @@ define void @local_atomic_fmax_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX6-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX6-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; GFX6-NEXT: v_max_f32_e32 v4, 4.0, v4
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX6-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX6-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX6-NEXT: v_mov_b32_e32 v4, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_mul_f32_e32 v3, 1.0, v3
+; GFX6-NEXT: v_max_f32_e32 v3, 4.0, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX6-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v3, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB17_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5189,26 +5197,27 @@ define void @local_atomic_fmax_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX12-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v1
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v2, 4.0, v2
-; GFX12-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX12-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v1, 4.0, v1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX12-TRUE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
-; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.h
-; GFX12-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v3
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v1.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v3
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -5231,25 +5240,26 @@ define void @local_atomic_fmax_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX12-FAKE16-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v1
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v2, 4.0, v2
-; GFX12-FAKE16-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX12-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v1, 4.0, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX12-FAKE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX12-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -5270,21 +5280,21 @@ define void @local_atomic_fmax_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX942-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX942-NEXT: v_max_f32_e32 v2, 4.0, v2
-; GFX942-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX942-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX942-NEXT: v_add3_u32 v3, v3, v2, s2
-; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
+; GFX942-NEXT: v_mov_b32_e32 v2, v1
+; GFX942-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX942-NEXT: v_max_f32_e32 v1, 4.0, v1
+; GFX942-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX942-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX942-NEXT: v_add3_u32 v3, v3, v1, s2
+; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX942-NEXT: s_nop 1
-; GFX942-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
-; GFX942-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX942-NEXT: v_and_or_b32 v2, v1, s3, v2
-; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX942-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
+; GFX942-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX942-NEXT: v_and_or_b32 v1, v2, s3, v1
+; GFX942-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v1, v2
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB19_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5300,25 +5310,26 @@ define void @local_atomic_fmax_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX11-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v1
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_max_f32_e32 v2, 4.0, v2
-; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v1, 4.0, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.h
-; GFX11-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v1.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v3
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -5336,24 +5347,25 @@ define void @local_atomic_fmax_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX11-FAKE16-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v1
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_max_f32_e32 v2, 4.0, v2
-; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v1, 4.0, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -5370,21 +5382,21 @@ define void @local_atomic_fmax_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX10-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX10-NEXT: v_max_f32_e32 v2, 4.0, v2
-; GFX10-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX10-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
-; GFX10-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX10-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX10-NEXT: v_mov_b32_e32 v2, v1
+; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX10-NEXT: v_max_f32_e32 v1, 4.0, v1
+; GFX10-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX10-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX10-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX10-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX10-NEXT: v_mov_b32_e32 v1, v2
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB19_1
@@ -5402,20 +5414,20 @@ define void @local_atomic_fmax_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX90A-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX90A-NEXT: v_max_f32_e32 v2, 4.0, v2
-; GFX90A-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX90A-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX90A-NEXT: v_add3_u32 v3, v3, v2, s6
-; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
-; GFX90A-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
-; GFX90A-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX90A-NEXT: v_and_or_b32 v2, v1, s7, v2
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX90A-NEXT: v_mov_b32_e32 v2, v1
+; GFX90A-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX90A-NEXT: v_max_f32_e32 v1, 4.0, v1
+; GFX90A-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX90A-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX90A-NEXT: v_add3_u32 v3, v3, v1, s6
+; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
+; GFX90A-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
+; GFX90A-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX90A-NEXT: v_and_or_b32 v1, v2, s7, v1
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v1, v2
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB19_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5432,20 +5444,20 @@ define void @local_atomic_fmax_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX908-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX908-NEXT: v_max_f32_e32 v2, 4.0, v2
-; GFX908-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX908-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX908-NEXT: v_add3_u32 v3, v3, v2, s6
-; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
-; GFX908-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
-; GFX908-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX908-NEXT: v_and_or_b32 v2, v1, s7, v2
-; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX908-NEXT: v_mov_b32_e32 v2, v1
+; GFX908-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX908-NEXT: v_max_f32_e32 v1, 4.0, v1
+; GFX908-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX908-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX908-NEXT: v_add3_u32 v3, v3, v1, s6
+; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
+; GFX908-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
+; GFX908-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX908-NEXT: v_and_or_b32 v1, v2, s7, v1
+; GFX908-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v1, v2
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB19_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5461,21 +5473,21 @@ define void @local_atomic_fmax_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX8-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX8-NEXT: v_max_f32_e32 v2, 4.0, v2
-; GFX8-NEXT: v_bfe_u32 v4, v2, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v2
+; GFX8-NEXT: v_mov_b32_e32 v2, v1
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX8-NEXT: v_max_f32_e32 v1, 4.0, v1
+; GFX8-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v1
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0x7fff, v4
-; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v2
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
-; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX8-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc
-; GFX8-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
+; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v4, v5, vcc
+; GFX8-NEXT: v_or_b32_sdwa v1, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v1, v2
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB19_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5491,17 +5503,17 @@ define void @local_atomic_fmax_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX7-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GFX7-NEXT: v_max_f32_e32 v2, 4.0, v2
-; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX7-NEXT: v_or_b32_e32 v2, v3, v2
-; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX7-NEXT: v_mov_b32_e32 v2, v1
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
+; GFX7-NEXT: v_max_f32_e32 v1, 4.0, v1
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v1, v3, v1
+; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v1, v2
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB19_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5518,17 +5530,17 @@ define void @local_atomic_fmax_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX6-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX6-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GFX6-NEXT: v_max_f32_e32 v2, 4.0, v2
-; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX6-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX6-NEXT: v_or_b32_e32 v2, v3, v2
-; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX6-NEXT: v_mov_b32_e32 v2, v1
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX6-NEXT: v_mul_f32_e32 v1, 1.0, v1
+; GFX6-NEXT: v_max_f32_e32 v1, 4.0, v1
+; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX6-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_or_b32_e32 v1, v3, v1
+; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v1, v2
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB19_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6101,15 +6113,15 @@ define void @local_atomic_fmax_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX12-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_pk_max_num_f16 v3, v2, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_pk_max_num_f16 v3, v3, v1
+; GFX12-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_pk_max_num_f16 v2, v3, v3
+; GFX12-NEXT: v_pk_max_num_f16 v2, v2, v1
; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v2
+; GFX12-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v3
; GFX12-NEXT: s_wait_dscnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX12-NEXT: v_mov_b32_e32 v2, v3
+; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-NEXT: s_wait_alu 0xfffe
@@ -6129,14 +6141,14 @@ define void @local_atomic_fmax_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX942-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_pk_max_f16 v3, v2, v2
+; GFX942-NEXT: v_mov_b32_e32 v3, v2
+; GFX942-NEXT: v_pk_max_f16 v2, v3, v3
; GFX942-NEXT: s_nop 0
-; GFX942-NEXT: v_pk_max_f16 v3, v3, v1
-; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3
+; GFX942-NEXT: v_pk_max_f16 v2, v2, v1
+; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v2, v3
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB22_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6152,15 +6164,15 @@ define void @local_atomic_fmax_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX11-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_pk_max_f16 v3, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_pk_max_f16 v3, v3, v1
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_pk_max_f16 v2, v3, v3
+; GFX11-NEXT: v_pk_max_f16 v2, v2, v1
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v2
+; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v3
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX11-NEXT: v_mov_b32_e32 v2, v3
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -6178,14 +6190,14 @@ define void @local_atomic_fmax_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX10-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_pk_max_f16 v3, v2, v2
-; GFX10-NEXT: v_pk_max_f16 v3, v3, v1
+; GFX10-NEXT: v_mov_b32_e32 v3, v2
+; GFX10-NEXT: v_pk_max_f16 v2, v3, v3
+; GFX10-NEXT: v_pk_max_f16 v2, v2, v1
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3
+; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX10-NEXT: v_mov_b32_e32 v2, v3
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB22_1
@@ -6202,13 +6214,13 @@ define void @local_atomic_fmax_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX90A-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_pk_max_f16 v3, v2, v2
-; GFX90A-NEXT: v_pk_max_f16 v3, v3, v1
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3
+; GFX90A-NEXT: v_mov_b32_e32 v3, v2
+; GFX90A-NEXT: v_pk_max_f16 v2, v3, v3
+; GFX90A-NEXT: v_pk_max_f16 v2, v2, v1
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v2, v3
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB22_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6224,13 +6236,13 @@ define void @local_atomic_fmax_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX908-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_pk_max_f16 v3, v2, v2
-; GFX908-NEXT: v_pk_max_f16 v3, v3, v1
-; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3
+; GFX908-NEXT: v_mov_b32_e32 v3, v2
+; GFX908-NEXT: v_pk_max_f16 v2, v3, v3
+; GFX908-NEXT: v_pk_max_f16 v2, v2, v1
+; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v2, v3
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB22_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6248,16 +6260,16 @@ define void @local_atomic_fmax_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX8-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_max_f16_sdwa v4, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_max_f16_e32 v5, v3, v3
-; GFX8-NEXT: v_max_f16_sdwa v4, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_max_f16_sdwa v3, v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_e32 v5, v4, v4
+; GFX8-NEXT: v_max_f16_sdwa v3, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX8-NEXT: v_max_f16_e32 v5, v5, v1
-; GFX8-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB22_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6363,15 +6375,15 @@ define void @local_atomic_fmax_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX12-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_pk_max_num_f16 v3, v2, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_pk_max_num_f16 v3, v3, v1
+; GFX12-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_pk_max_num_f16 v2, v3, v3
+; GFX12-NEXT: v_pk_max_num_f16 v2, v2, v1
; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v2 offset:65532
+; GFX12-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v3 offset:65532
; GFX12-NEXT: s_wait_dscnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX12-NEXT: v_mov_b32_e32 v2, v3
+; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-NEXT: s_wait_alu 0xfffe
@@ -6391,14 +6403,14 @@ define void @local_atomic_fmax_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX942-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_pk_max_f16 v3, v2, v2
+; GFX942-NEXT: v_mov_b32_e32 v3, v2
+; GFX942-NEXT: v_pk_max_f16 v2, v3, v3
; GFX942-NEXT: s_nop 0
-; GFX942-NEXT: v_pk_max_f16 v3, v3, v1
-; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532
+; GFX942-NEXT: v_pk_max_f16 v2, v2, v1
+; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v2, v3
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB23_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6414,15 +6426,15 @@ define void @local_atomic_fmax_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX11-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_pk_max_f16 v3, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_pk_max_f16 v3, v3, v1
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_pk_max_f16 v2, v3, v3
+; GFX11-NEXT: v_pk_max_f16 v2, v2, v1
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v2 offset:65532
+; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v3 offset:65532
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX11-NEXT: v_mov_b32_e32 v2, v3
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -6440,14 +6452,14 @@ define void @local_atomic_fmax_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX10-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_pk_max_f16 v3, v2, v2
-; GFX10-NEXT: v_pk_max_f16 v3, v3, v1
+; GFX10-NEXT: v_mov_b32_e32 v3, v2
+; GFX10-NEXT: v_pk_max_f16 v2, v3, v3
+; GFX10-NEXT: v_pk_max_f16 v2, v2, v1
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532
+; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX10-NEXT: v_mov_b32_e32 v2, v3
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB23_1
@@ -6464,13 +6476,13 @@ define void @local_atomic_fmax_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX90A-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_pk_max_f16 v3, v2, v2
-; GFX90A-NEXT: v_pk_max_f16 v3, v3, v1
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532
+; GFX90A-NEXT: v_mov_b32_e32 v3, v2
+; GFX90A-NEXT: v_pk_max_f16 v2, v3, v3
+; GFX90A-NEXT: v_pk_max_f16 v2, v2, v1
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v2, v3
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB23_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6486,13 +6498,13 @@ define void @local_atomic_fmax_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX908-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_pk_max_f16 v3, v2, v2
-; GFX908-NEXT: v_pk_max_f16 v3, v3, v1
-; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532
+; GFX908-NEXT: v_mov_b32_e32 v3, v2
+; GFX908-NEXT: v_pk_max_f16 v2, v3, v3
+; GFX908-NEXT: v_pk_max_f16 v2, v2, v1
+; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v2, v3
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB23_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6510,16 +6522,16 @@ define void @local_atomic_fmax_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX8-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_max_f16_sdwa v4, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_max_f16_e32 v5, v3, v3
-; GFX8-NEXT: v_max_f16_sdwa v4, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_max_f16_sdwa v3, v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_e32 v5, v4, v4
+; GFX8-NEXT: v_max_f16_sdwa v3, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX8-NEXT: v_max_f16_e32 v5, v5, v1
-; GFX8-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB23_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7589,31 +7601,34 @@ define void @local_atomic_fmax_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX12-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_dual_max_num_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v4, v4, v1
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v5, v5, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v3, v3, v1
; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo
; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v5, v3
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v5, v4
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -7638,32 +7653,33 @@ define void @local_atomic_fmax_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX12-FAKE16-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_dual_max_num_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v4, v4, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v5, v5, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v3, v3, v2
; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4
+; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
-; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v3, v6, v8, s0
+; GFX12-FAKE16-NEXT: v_perm_b32 v3, v5, v3, 0x7060302
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -7686,27 +7702,27 @@ define void @local_atomic_fmax_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX942-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX942-NEXT: v_max_f32_e32 v4, v4, v2
+; GFX942-NEXT: v_mov_b32_e32 v4, v3
+; GFX942-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX942-NEXT: v_max_f32_e32 v3, v3, v2
; GFX942-NEXT: v_max_f32_e32 v5, v5, v1
-; GFX942-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX942-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX942-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX942-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX942-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX942-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX942-NEXT: v_add3_u32 v6, v6, v4, s4
+; GFX942-NEXT: v_add3_u32 v6, v6, v3, s4
; GFX942-NEXT: v_add3_u32 v8, v8, v5, s4
; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX942-NEXT: v_cmp_u_f32_e64 s[0:1], v4, v4
+; GFX942-NEXT: v_cmp_u_f32_e64 s[0:1], v3, v3
; GFX942-NEXT: s_nop 0
; GFX942-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX942-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[0:1]
-; GFX942-NEXT: v_perm_b32 v4, v5, v4, s5
-; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX942-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[0:1]
+; GFX942-NEXT: v_perm_b32 v3, v5, v3, s5
+; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; GFX942-NEXT: v_mov_b32_e32 v3, v4
; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_cbranch_execnz .LBB26_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7724,30 +7740,32 @@ define void @local_atomic_fmax_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX11-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_dual_max_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX11-TRUE16-NEXT: v_max_f32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v3, v3, v1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v5, v3
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v5, v4
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -7768,30 +7786,32 @@ define void @local_atomic_fmax_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX11-FAKE16-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_dual_max_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX11-FAKE16-NEXT: v_max_f32_e32 v4, v4, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v5, v5, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v3, v3, v2
; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v6, v8, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v5, v3, 0x7060302
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
@@ -7811,27 +7831,27 @@ define void @local_atomic_fmax_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX10-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX10-NEXT: v_max_f32_e32 v4, v4, v2
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX10-NEXT: v_max_f32_e32 v3, v3, v2
; GFX10-NEXT: v_max_f32_e32 v5, v5, v1
-; GFX10-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX10-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX10-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v3
; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX10-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX10-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
; GFX10-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX10-NEXT: v_cmp_u_f32_e64 s4, v4, v4
+; GFX10-NEXT: v_cmp_u_f32_e64 s4, v3, v3
; GFX10-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e64 v4, v6, v8, s4
-; GFX10-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v6, v8, s4
+; GFX10-NEXT: v_perm_b32 v3, v5, v3, 0x7060302
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX10-NEXT: v_mov_b32_e32 v3, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5
; GFX10-NEXT: s_cbranch_execnz .LBB26_1
@@ -7851,26 +7871,26 @@ define void @local_atomic_fmax_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX90A-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX90A-NEXT: v_max_f32_e32 v4, v4, v2
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX90A-NEXT: v_max_f32_e32 v3, v3, v2
; GFX90A-NEXT: v_max_f32_e32 v5, v5, v1
-; GFX90A-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX90A-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX90A-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX90A-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX90A-NEXT: v_add3_u32 v6, v6, v4, s8
+; GFX90A-NEXT: v_add3_u32 v6, v6, v3, s8
; GFX90A-NEXT: v_add3_u32 v8, v8, v5, s8
; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
-; GFX90A-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
+; GFX90A-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX90A-NEXT: v_perm_b32 v4, v5, v4, s9
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX90A-NEXT: v_perm_b32 v3, v5, v3, s9
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX90A-NEXT: s_cbranch_execnz .LBB26_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7889,26 +7909,26 @@ define void @local_atomic_fmax_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX908-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX908-NEXT: v_max_f32_e32 v4, v4, v2
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX908-NEXT: v_max_f32_e32 v3, v3, v2
; GFX908-NEXT: v_max_f32_e32 v5, v5, v1
-; GFX908-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX908-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX908-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX908-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX908-NEXT: v_add3_u32 v6, v6, v4, s8
+; GFX908-NEXT: v_add3_u32 v6, v6, v3, s8
; GFX908-NEXT: v_add3_u32 v8, v8, v5, s8
; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
-; GFX908-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
+; GFX908-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX908-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX908-NEXT: v_perm_b32 v4, v5, v4, s9
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX908-NEXT: v_perm_b32 v3, v5, v3, s9
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX908-NEXT: s_cbranch_execnz .LBB26_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7926,29 +7946,29 @@ define void @local_atomic_fmax_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX8-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX8-NEXT: v_max_f32_e32 v4, v4, v2
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX8-NEXT: v_max_f32_e32 v3, v3, v2
; GFX8-NEXT: v_max_f32_e32 v5, v5, v1
-; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX8-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3
; GFX8-NEXT: v_add_u32_e32 v8, vcc, v8, v5
; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6
; GFX8-NEXT: v_add_u32_e32 v8, vcc, 0x7fff, v8
; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4
-; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
+; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3
+; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
; GFX8-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX8-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX8-NEXT: v_alignbit_b32 v4, v5, v4, 16
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX8-NEXT: v_alignbit_b32 v3, v5, v3, 16
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX8-NEXT: s_cbranch_execnz .LBB26_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -8047,31 +8067,34 @@ define void @local_atomic_fmax_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX12-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_dual_max_num_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v4, v4, v1
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v5, v5, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-TRUE16-NEXT: v_max_num_f32_e32 v3, v3, v1
; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo
; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v5, v3 offset:65532
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v5, v4 offset:65532
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -8096,32 +8119,33 @@ define void @local_atomic_fmax_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX12-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_dual_max_num_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v4, v4, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v5, v5, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX12-FAKE16-NEXT: v_max_num_f32_e32 v3, v3, v2
; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4
+; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
-; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v3, v6, v8, s0
+; GFX12-FAKE16-NEXT: v_perm_b32 v3, v5, v3, 0x7060302
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 offset:65532
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 offset:65532
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -8144,27 +8168,27 @@ define void @local_atomic_fmax_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX942-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX942-NEXT: v_max_f32_e32 v4, v4, v2
+; GFX942-NEXT: v_mov_b32_e32 v4, v3
+; GFX942-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX942-NEXT: v_max_f32_e32 v3, v3, v2
; GFX942-NEXT: v_max_f32_e32 v5, v5, v1
-; GFX942-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX942-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX942-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX942-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX942-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX942-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX942-NEXT: v_add3_u32 v6, v6, v4, s4
+; GFX942-NEXT: v_add3_u32 v6, v6, v3, s4
; GFX942-NEXT: v_add3_u32 v8, v8, v5, s4
; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX942-NEXT: v_cmp_u_f32_e64 s[0:1], v4, v4
+; GFX942-NEXT: v_cmp_u_f32_e64 s[0:1], v3, v3
; GFX942-NEXT: s_nop 0
; GFX942-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX942-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[0:1]
-; GFX942-NEXT: v_perm_b32 v4, v5, v4, s5
-; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532
+; GFX942-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[0:1]
+; GFX942-NEXT: v_perm_b32 v3, v5, v3, s5
+; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; GFX942-NEXT: v_mov_b32_e32 v3, v4
; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_cbranch_execnz .LBB27_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -8182,30 +8206,32 @@ define void @local_atomic_fmax_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX11-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_dual_max_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX11-TRUE16-NEXT: v_max_f32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_max_f32_e32 v3, v3, v1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v5, v3 offset:65532
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v5, v4 offset:65532
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -8226,30 +8252,32 @@ define void @local_atomic_fmax_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX11-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_dual_max_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX11-FAKE16-NEXT: v_max_f32_e32 v4, v4, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v5, v5, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_max_f32_e32 v3, v3, v2
; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v6, v8, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v5, v3, 0x7060302
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 offset:65532
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 offset:65532
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
@@ -8269,27 +8297,27 @@ define void @local_atomic_fmax_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX10-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX10-NEXT: v_max_f32_e32 v4, v4, v2
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX10-NEXT: v_max_f32_e32 v3, v3, v2
; GFX10-NEXT: v_max_f32_e32 v5, v5, v1
-; GFX10-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX10-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX10-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v3
; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX10-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX10-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
; GFX10-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX10-NEXT: v_cmp_u_f32_e64 s4, v4, v4
+; GFX10-NEXT: v_cmp_u_f32_e64 s4, v3, v3
; GFX10-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e64 v4, v6, v8, s4
-; GFX10-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v6, v8, s4
+; GFX10-NEXT: v_perm_b32 v3, v5, v3, 0x7060302
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX10-NEXT: v_mov_b32_e32 v3, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5
; GFX10-NEXT: s_cbranch_execnz .LBB27_1
@@ -8309,26 +8337,26 @@ define void @local_atomic_fmax_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX90A-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX90A-NEXT: v_max_f32_e32 v4, v4, v2
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX90A-NEXT: v_max_f32_e32 v3, v3, v2
; GFX90A-NEXT: v_max_f32_e32 v5, v5, v1
-; GFX90A-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX90A-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX90A-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX90A-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX90A-NEXT: v_add3_u32 v6, v6, v4, s8
+; GFX90A-NEXT: v_add3_u32 v6, v6, v3, s8
; GFX90A-NEXT: v_add3_u32 v8, v8, v5, s8
; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
-; GFX90A-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
+; GFX90A-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX90A-NEXT: v_perm_b32 v4, v5, v4, s9
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532
+; GFX90A-NEXT: v_perm_b32 v3, v5, v3, s9
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX90A-NEXT: s_cbranch_execnz .LBB27_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -8347,26 +8375,26 @@ define void @local_atomic_fmax_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX908-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX908-NEXT: v_max_f32_e32 v4, v4, v2
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX908-NEXT: v_max_f32_e32 v3, v3, v2
; GFX908-NEXT: v_max_f32_e32 v5, v5, v1
-; GFX908-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX908-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX908-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX908-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX908-NEXT: v_add3_u32 v6, v6, v4, s8
+; GFX908-NEXT: v_add3_u32 v6, v6, v3, s8
; GFX908-NEXT: v_add3_u32 v8, v8, v5, s8
; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
-; GFX908-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
+; GFX908-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX908-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX908-NEXT: v_perm_b32 v4, v5, v4, s9
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532
+; GFX908-NEXT: v_perm_b32 v3, v5, v3, s9
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX908-NEXT: s_cbranch_execnz .LBB27_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -8384,29 +8412,29 @@ define void @local_atomic_fmax_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX8-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX8-NEXT: v_max_f32_e32 v4, v4, v2
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX8-NEXT: v_max_f32_e32 v3, v3, v2
; GFX8-NEXT: v_max_f32_e32 v5, v5, v1
-; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX8-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3
; GFX8-NEXT: v_add_u32_e32 v8, vcc, v8, v5
; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6
; GFX8-NEXT: v_add_u32_e32 v8, vcc, 0x7fff, v8
; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4
-; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
+; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3
+; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
; GFX8-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX8-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX8-NEXT: v_alignbit_b32 v4, v5, v4, 16
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532
+; GFX8-NEXT: v_alignbit_b32 v3, v5, v3, 16
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX8-NEXT: s_cbranch_execnz .LBB27_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
diff --git a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fmin.ll b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fmin.ll
index 11ed43d..0aa8d33 100644
--- a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fmin.ll
+++ b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fmin.ll
@@ -1598,29 +1598,29 @@ define void @local_atomic_fmin_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
-; GFX12-TRUE16-NEXT: ds_load_b32 v2, v1
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2
; GFX12-TRUE16-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0
-; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v4.l, v4.l, v4.l
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v3.l, v3.l, v3.l
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v4.l, 4.0, v4.l
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v3.l, 4.0, v3.l
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -1641,29 +1641,29 @@ define void @local_atomic_fmin_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
-; GFX12-FAKE16-NEXT: ds_load_b32 v2, v1
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-FAKE16-NEXT: ds_load_b32 v3, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2
; GFX12-FAKE16-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v4, v4, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v3, v3, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v4, 4.0, v4
-; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v3, 4.0, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -1688,16 +1688,16 @@ define void @local_atomic_fmin_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX942-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX942-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX942-NEXT: v_min_f16_e32 v4, 4.0, v4
-; GFX942-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX942-NEXT: v_mov_b32_e32 v4, v3
+; GFX942-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX942-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX942-NEXT: v_min_f16_e32 v3, 4.0, v3
+; GFX942-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v3, v4
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB10_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1710,29 +1710,29 @@ define void @local_atomic_fmin_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
-; GFX11-TRUE16-NEXT: ds_load_b32 v2, v1
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2
; GFX11-TRUE16-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0
-; GFX11-TRUE16-NEXT: v_max_f16_e32 v4.l, v4.l, v4.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v3.l, v3.l, v3.l
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_min_f16_e32 v4.l, 4.0, v4.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX11-TRUE16-NEXT: v_min_f16_e32 v3.l, 4.0, v3.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -1747,29 +1747,29 @@ define void @local_atomic_fmin_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
-; GFX11-FAKE16-NEXT: ds_load_b32 v2, v1
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2
; GFX11-FAKE16-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX11-FAKE16-NEXT: v_max_f16_e32 v4, v4, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v3, v3, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_min_f16_e32 v4, 4.0, v4
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_min_f16_e32 v3, 4.0, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -1784,24 +1784,24 @@ define void @local_atomic_fmin_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX10-NEXT: v_and_b32_e32 v1, -4, v0
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX10-NEXT: s_mov_b32 s4, 0
-; GFX10-NEXT: ds_read_b32 v2, v1
-; GFX10-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX10-NEXT: ds_read_b32 v3, v1
+; GFX10-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX10-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX10-NEXT: v_not_b32_e32 v3, v3
+; GFX10-NEXT: v_not_b32_e32 v2, v2
; GFX10-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX10-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX10-NEXT: v_min_f16_e32 v4, 4.0, v4
-; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; GFX10-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX10-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX10-NEXT: v_min_f16_e32 v3, 4.0, v3
+; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v1, v2, v4
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX10-NEXT: v_mov_b32_e32 v2, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB10_1
@@ -1823,16 +1823,16 @@ define void @local_atomic_fmin_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX90A-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX90A-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX90A-NEXT: v_min_f16_e32 v4, 4.0, v4
-; GFX90A-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX90A-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX90A-NEXT: v_min_f16_e32 v3, 4.0, v3
+; GFX90A-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB10_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1853,16 +1853,16 @@ define void @local_atomic_fmin_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX908-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX908-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX908-NEXT: v_min_f16_e32 v4, 4.0, v4
-; GFX908-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX908-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX908-NEXT: v_min_f16_e32 v3, 4.0, v3
+; GFX908-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB10_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1884,17 +1884,17 @@ define void @local_atomic_fmin_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX8-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX8-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX8-NEXT: v_min_f16_e32 v4, 4.0, v4
-; GFX8-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX8-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX8-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX8-NEXT: v_min_f16_e32 v3, 4.0, v3
+; GFX8-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB10_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1915,18 +1915,18 @@ define void @local_atomic_fmin_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX7-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v4
-; GFX7-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX7-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX7-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX7-NEXT: v_mov_b32_e32 v4, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX7-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX7-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX7-NEXT: v_cvt_f16_f32_e32 v3, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX7-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v3, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB10_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1947,18 +1947,18 @@ define void @local_atomic_fmin_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX6-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX6-NEXT: v_cvt_f32_f16_e32 v4, v4
-; GFX6-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX6-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX6-NEXT: v_cvt_f16_f32_e32 v4, v4
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX6-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX6-NEXT: v_mov_b32_e32 v4, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX6-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX6-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX6-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX6-NEXT: v_cvt_f16_f32_e32 v3, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX6-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v3, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB10_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1989,20 +1989,21 @@ define void @local_atomic_fmin_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX12-TRUE16-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v4.l, v4.l, v4.l
-; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v4.l, 4.0, v4.l
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v3.l, v3.l, v3.l
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v3.l, 4.0, v3.l
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -2033,21 +2034,21 @@ define void @local_atomic_fmin_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX12-FAKE16-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v4, v4, v4
-; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v4, 4.0, v4
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v3, v3, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v3, 4.0, v3
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -2073,16 +2074,16 @@ define void @local_atomic_fmin_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX942-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX942-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX942-NEXT: v_min_f16_e32 v4, 4.0, v4
-; GFX942-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX942-NEXT: v_mov_b32_e32 v4, v3
+; GFX942-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX942-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX942-NEXT: v_min_f16_e32 v3, 4.0, v3
+; GFX942-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v3, v4
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB11_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2105,20 +2106,21 @@ define void @local_atomic_fmin_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX11-TRUE16-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_max_f16_e32 v4.l, v4.l, v4.l
-; GFX11-TRUE16-NEXT: v_min_f16_e32 v4.l, 4.0, v4.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v3.l, v3.l, v3.l
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-TRUE16-NEXT: v_min_f16_e32 v3.l, 4.0, v3.l
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -2143,21 +2145,21 @@ define void @local_atomic_fmin_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX11-FAKE16-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX11-FAKE16-NEXT: v_min_f16_e32 v4, 4.0, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v3, v3, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-FAKE16-NEXT: v_min_f16_e32 v3, 4.0, v3
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -2180,17 +2182,17 @@ define void @local_atomic_fmin_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX10-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX10-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX10-NEXT: v_min_f16_e32 v4, 4.0, v4
-; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; GFX10-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX10-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX10-NEXT: v_min_f16_e32 v3, 4.0, v3
+; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX10-NEXT: v_mov_b32_e32 v3, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB11_1
@@ -2213,16 +2215,16 @@ define void @local_atomic_fmin_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX90A-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX90A-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX90A-NEXT: v_min_f16_e32 v4, 4.0, v4
-; GFX90A-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX90A-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX90A-NEXT: v_min_f16_e32 v3, 4.0, v3
+; GFX90A-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB11_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2244,16 +2246,16 @@ define void @local_atomic_fmin_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX908-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX908-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX908-NEXT: v_min_f16_e32 v4, 4.0, v4
-; GFX908-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX908-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX908-NEXT: v_min_f16_e32 v3, 4.0, v3
+; GFX908-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB11_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2276,17 +2278,17 @@ define void @local_atomic_fmin_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX8-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX8-NEXT: v_max_f16_e32 v4, v4, v4
-; GFX8-NEXT: v_min_f16_e32 v4, 4.0, v4
-; GFX8-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX8-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX8-NEXT: v_max_f16_e32 v3, v3, v3
+; GFX8-NEXT: v_min_f16_e32 v3, 4.0, v3
+; GFX8-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB11_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2308,18 +2310,18 @@ define void @local_atomic_fmin_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX7-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v4
-; GFX7-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX7-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX7-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX7-NEXT: v_mov_b32_e32 v4, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX7-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX7-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX7-NEXT: v_cvt_f16_f32_e32 v3, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX7-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v3, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB11_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2341,18 +2343,18 @@ define void @local_atomic_fmin_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX6-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX6-NEXT: v_cvt_f32_f16_e32 v4, v4
-; GFX6-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX6-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX6-NEXT: v_cvt_f16_f32_e32 v4, v4
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX6-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX6-NEXT: v_mov_b32_e32 v4, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX6-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX6-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX6-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX6-NEXT: v_cvt_f16_f32_e32 v3, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX6-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v3, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB11_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2685,17 +2687,18 @@ define void @local_atomic_fmin_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX12-TRUE16-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v2.l, v1.l, v1.l
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.h, 0
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v1.h, 0
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v2.l, 4.0, v2.l
-; GFX12-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX12-TRUE16-NEXT: v_max_num_f16_e32 v1.l, v2.l, v2.l
+; GFX12-TRUE16-NEXT: v_min_num_f16_e32 v1.l, 4.0, v1.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -2718,18 +2721,18 @@ define void @local_atomic_fmin_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX12-FAKE16-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v2, v1, v1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v1
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v2, 4.0, v2
-; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX12-FAKE16-NEXT: v_max_num_f16_e32 v1, v2, v2
+; GFX12-FAKE16-NEXT: v_min_num_f16_e32 v1, 4.0, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX12-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -2749,14 +2752,14 @@ define void @local_atomic_fmin_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX942-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_max_f16_e32 v2, v1, v1
-; GFX942-NEXT: v_min_f16_e32 v2, 4.0, v2
-; GFX942-NEXT: v_and_or_b32 v2, v1, s2, v2
-; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX942-NEXT: v_mov_b32_e32 v2, v1
+; GFX942-NEXT: v_max_f16_e32 v1, v2, v2
+; GFX942-NEXT: v_min_f16_e32 v1, 4.0, v1
+; GFX942-NEXT: v_and_or_b32 v1, v2, s2, v1
+; GFX942-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v1, v2
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB13_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2771,17 +2774,18 @@ define void @local_atomic_fmin_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX11-TRUE16-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_max_f16_e32 v2.l, v1.l, v1.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, 0
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, 0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_min_f16_e32 v2.l, 4.0, v2.l
-; GFX11-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v1.l, v2.l, v2.l
+; GFX11-TRUE16-NEXT: v_min_f16_e32 v1.l, 4.0, v1.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -2798,18 +2802,18 @@ define void @local_atomic_fmin_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX11-FAKE16-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_max_f16_e32 v2, v1, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v1
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_min_f16_e32 v2, 4.0, v2
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v1, v2, v2
+; GFX11-FAKE16-NEXT: v_min_f16_e32 v1, 4.0, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -2826,16 +2830,16 @@ define void @local_atomic_fmin_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX10-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_max_f16_e32 v2, v1, v1
-; GFX10-NEXT: v_min_f16_e32 v2, 4.0, v2
-; GFX10-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX10-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX10-NEXT: v_mov_b32_e32 v2, v1
+; GFX10-NEXT: v_max_f16_e32 v1, v2, v2
+; GFX10-NEXT: v_min_f16_e32 v1, 4.0, v1
+; GFX10-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX10-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX10-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX10-NEXT: v_mov_b32_e32 v1, v2
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB13_1
@@ -2852,14 +2856,14 @@ define void @local_atomic_fmin_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX90A-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_max_f16_e32 v2, v1, v1
-; GFX90A-NEXT: v_min_f16_e32 v2, 4.0, v2
-; GFX90A-NEXT: v_and_or_b32 v2, v1, s6, v2
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX90A-NEXT: v_mov_b32_e32 v2, v1
+; GFX90A-NEXT: v_max_f16_e32 v1, v2, v2
+; GFX90A-NEXT: v_min_f16_e32 v1, 4.0, v1
+; GFX90A-NEXT: v_and_or_b32 v1, v2, s6, v1
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v1, v2
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB13_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2875,14 +2879,14 @@ define void @local_atomic_fmin_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX908-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_max_f16_e32 v2, v1, v1
-; GFX908-NEXT: v_min_f16_e32 v2, 4.0, v2
-; GFX908-NEXT: v_and_or_b32 v2, v1, s6, v2
-; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX908-NEXT: v_mov_b32_e32 v2, v1
+; GFX908-NEXT: v_max_f16_e32 v1, v2, v2
+; GFX908-NEXT: v_min_f16_e32 v1, 4.0, v1
+; GFX908-NEXT: v_and_or_b32 v1, v2, s6, v1
+; GFX908-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v1, v2
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB13_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2898,15 +2902,15 @@ define void @local_atomic_fmin_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX8-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_max_f16_e32 v2, v1, v1
-; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX8-NEXT: v_min_f16_e32 v2, 4.0, v2
-; GFX8-NEXT: v_or_b32_e32 v2, v3, v2
-; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX8-NEXT: v_mov_b32_e32 v2, v1
+; GFX8-NEXT: v_max_f16_e32 v1, v2, v2
+; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX8-NEXT: v_min_f16_e32 v1, 4.0, v1
+; GFX8-NEXT: v_or_b32_e32 v1, v3, v1
+; GFX8-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v1, v2
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB13_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2922,16 +2926,16 @@ define void @local_atomic_fmin_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX7-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v1
-; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX7-NEXT: v_min_f32_e32 v2, 4.0, v2
-; GFX7-NEXT: v_cvt_f16_f32_e32 v2, v2
-; GFX7-NEXT: v_or_b32_e32 v2, v3, v2
-; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX7-NEXT: v_mov_b32_e32 v2, v1
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v2
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX7-NEXT: v_min_f32_e32 v1, 4.0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
+; GFX7-NEXT: v_or_b32_e32 v1, v3, v1
+; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v1, v2
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB13_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2948,16 +2952,16 @@ define void @local_atomic_fmin_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX6-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cvt_f32_f16_e32 v2, v1
-; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX6-NEXT: v_min_f32_e32 v2, 4.0, v2
-; GFX6-NEXT: v_cvt_f16_f32_e32 v2, v2
-; GFX6-NEXT: v_or_b32_e32 v2, v3, v2
-; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX6-NEXT: v_mov_b32_e32 v2, v1
+; GFX6-NEXT: v_cvt_f32_f16_e32 v1, v2
+; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX6-NEXT: v_min_f32_e32 v1, 4.0, v1
+; GFX6-NEXT: v_cvt_f16_f32_e32 v1, v1
+; GFX6-NEXT: v_or_b32_e32 v1, v3, v1
+; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v1, v2
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB13_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3911,38 +3915,38 @@ define void @local_atomic_fmin_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
-; GFX12-TRUE16-NEXT: ds_load_b32 v2, v1
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2
; GFX12-TRUE16-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v4, 4.0, v4
-; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v3, 4.0, v3
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
-; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v5
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v5
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -3963,37 +3967,37 @@ define void @local_atomic_fmin_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
-; GFX12-FAKE16-NEXT: ds_load_b32 v2, v1
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-FAKE16-NEXT: ds_load_b32 v3, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2
; GFX12-FAKE16-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v4, 4.0, v4
-; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v3, 4.0, v3
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -4019,22 +4023,22 @@ define void @local_atomic_fmin_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX942-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX942-NEXT: v_mov_b32_e32 v4, v3
+; GFX942-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX942-NEXT: s_nop 0
-; GFX942-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX942-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX942-NEXT: v_add3_u32 v5, v5, v4, s2
-; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
+; GFX942-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX942-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX942-NEXT: v_add3_u32 v5, v5, v3, s2
+; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX942-NEXT: s_nop 1
-; GFX942-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX942-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX942-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX942-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v3, v4
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB16_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4047,38 +4051,38 @@ define void @local_atomic_fmin_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
-; GFX11-TRUE16-NEXT: ds_load_b32 v2, v1
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2
; GFX11-TRUE16-NEXT: .p2align 6
; GFX11-TRUE16-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v5
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -4093,37 +4097,37 @@ define void @local_atomic_fmin_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
-; GFX11-FAKE16-NEXT: ds_load_b32 v2, v1
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2
; GFX11-FAKE16-NEXT: .p2align 6
; GFX11-FAKE16-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -4138,28 +4142,28 @@ define void @local_atomic_fmin_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX10-NEXT: v_and_b32_e32 v1, -4, v0
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX10-NEXT: s_mov_b32 s4, 0
-; GFX10-NEXT: ds_read_b32 v2, v1
-; GFX10-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX10-NEXT: ds_read_b32 v3, v1
+; GFX10-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX10-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX10-NEXT: v_not_b32_e32 v3, v3
+; GFX10-NEXT: v_not_b32_e32 v2, v2
; GFX10-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshrrev_b32_sdwa v4, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX10-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX10-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX10-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX10-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX10-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX10-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v1, v2, v4
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX10-NEXT: v_mov_b32_e32 v2, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB16_1
@@ -4182,20 +4186,20 @@ define void @local_atomic_fmin_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX90A-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX90A-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX90A-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX90A-NEXT: v_add3_u32 v5, v5, v4, s6
-; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX90A-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX90A-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX90A-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX90A-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX90A-NEXT: v_add3_u32 v5, v5, v3, s6
+; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX90A-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX90A-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB16_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4217,20 +4221,20 @@ define void @local_atomic_fmin_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX908-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX908-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX908-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX908-NEXT: v_add3_u32 v5, v5, v4, s6
-; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX908-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX908-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX908-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX908-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX908-NEXT: v_add3_u32 v5, v5, v3, s6
+; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX908-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX908-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB16_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4252,22 +4256,22 @@ define void @local_atomic_fmin_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX8-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX8-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3
; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6
-; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX8-NEXT: v_cndmask_b32_e32 v4, v6, v7, vcc
-; GFX8-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v6, v7, vcc
+; GFX8-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB16_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4288,19 +4292,19 @@ define void @local_atomic_fmin_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX7-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; GFX7-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX7-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX7-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX7-NEXT: v_mov_b32_e32 v4, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
+; GFX7-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX7-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v3, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB16_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4321,19 +4325,19 @@ define void @local_atomic_fmin_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX6-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX6-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; GFX6-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX6-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX6-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX6-NEXT: v_mov_b32_e32 v4, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_mul_f32_e32 v3, 1.0, v3
+; GFX6-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX6-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v3, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB16_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4364,29 +4368,30 @@ define void @local_atomic_fmin_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX12-TRUE16-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v4, 4.0, v4
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v3, 4.0, v3
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v5
-; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -4417,28 +4422,29 @@ define void @local_atomic_fmin_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX12-FAKE16-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v4, 4.0, v4
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v3, 4.0, v3
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -4465,22 +4471,22 @@ define void @local_atomic_fmin_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX942-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX942-NEXT: v_mov_b32_e32 v4, v3
+; GFX942-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX942-NEXT: s_nop 0
-; GFX942-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX942-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX942-NEXT: v_add3_u32 v5, v5, v4, s2
-; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
+; GFX942-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX942-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX942-NEXT: v_add3_u32 v5, v5, v3, s2
+; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX942-NEXT: s_nop 1
-; GFX942-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX942-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX942-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX942-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v3, v4
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB17_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4504,28 +4510,29 @@ define void @local_atomic_fmin_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX11-TRUE16-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX11-TRUE16-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v5
-; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -4551,27 +4558,28 @@ define void @local_atomic_fmin_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX11-FAKE16-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX11-FAKE16-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -4594,21 +4602,21 @@ define void @local_atomic_fmin_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX10-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX10-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX10-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX10-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX10-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX10-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX10-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX10-NEXT: v_mov_b32_e32 v3, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB17_1
@@ -4632,20 +4640,20 @@ define void @local_atomic_fmin_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX90A-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX90A-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX90A-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX90A-NEXT: v_add3_u32 v5, v5, v4, s6
-; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX90A-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX90A-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX90A-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX90A-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX90A-NEXT: v_add3_u32 v5, v5, v3, s6
+; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX90A-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX90A-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB17_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4668,20 +4676,20 @@ define void @local_atomic_fmin_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX908-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX908-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX908-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX908-NEXT: v_add3_u32 v5, v5, v4, s6
-; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX908-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX908-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX908-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX908-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX908-NEXT: v_add3_u32 v5, v5, v3, s6
+; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX908-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX908-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB17_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4704,22 +4712,22 @@ define void @local_atomic_fmin_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX8-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX8-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3
; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6
-; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX8-NEXT: v_cndmask_b32_e32 v4, v6, v7, vcc
-; GFX8-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v6, v7, vcc
+; GFX8-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB17_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4741,19 +4749,19 @@ define void @local_atomic_fmin_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX7-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; GFX7-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX7-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX7-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX7-NEXT: v_mov_b32_e32 v4, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
+; GFX7-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX7-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v3, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB17_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4775,19 +4783,19 @@ define void @local_atomic_fmin_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX6-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX6-NEXT: v_mul_f32_e32 v4, 1.0, v4
-; GFX6-NEXT: v_min_f32_e32 v4, 4.0, v4
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX6-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX6-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX6-NEXT: v_mov_b32_e32 v4, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_mul_f32_e32 v3, 1.0, v3
+; GFX6-NEXT: v_min_f32_e32 v3, 4.0, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX6-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v3, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB17_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5189,26 +5197,27 @@ define void @local_atomic_fmin_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX12-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v1
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v2, 4.0, v2
-; GFX12-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX12-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v1, 4.0, v1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX12-TRUE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
-; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.h
-; GFX12-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v3
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v1.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v3
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -5231,25 +5240,26 @@ define void @local_atomic_fmin_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX12-FAKE16-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v1
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v2, 4.0, v2
-; GFX12-FAKE16-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX12-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v1, 4.0, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX12-FAKE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX12-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -5270,21 +5280,21 @@ define void @local_atomic_fmin_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX942-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX942-NEXT: v_min_f32_e32 v2, 4.0, v2
-; GFX942-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX942-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX942-NEXT: v_add3_u32 v3, v3, v2, s2
-; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
+; GFX942-NEXT: v_mov_b32_e32 v2, v1
+; GFX942-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX942-NEXT: v_min_f32_e32 v1, 4.0, v1
+; GFX942-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX942-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX942-NEXT: v_add3_u32 v3, v3, v1, s2
+; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX942-NEXT: s_nop 1
-; GFX942-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
-; GFX942-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX942-NEXT: v_and_or_b32 v2, v1, s3, v2
-; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX942-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
+; GFX942-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX942-NEXT: v_and_or_b32 v1, v2, s3, v1
+; GFX942-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v1, v2
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB19_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5300,25 +5310,26 @@ define void @local_atomic_fmin_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX11-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v1
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_min_f32_e32 v2, 4.0, v2
-; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v1, 4.0, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.h
-; GFX11-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v1.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v3
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -5336,24 +5347,25 @@ define void @local_atomic_fmin_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX11-FAKE16-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v1
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_min_f32_e32 v2, 4.0, v2
-; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v1, 4.0, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -5370,21 +5382,21 @@ define void @local_atomic_fmin_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX10-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX10-NEXT: v_min_f32_e32 v2, 4.0, v2
-; GFX10-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX10-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
-; GFX10-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX10-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX10-NEXT: v_mov_b32_e32 v2, v1
+; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX10-NEXT: v_min_f32_e32 v1, 4.0, v1
+; GFX10-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX10-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX10-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX10-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX10-NEXT: v_mov_b32_e32 v1, v2
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB19_1
@@ -5402,20 +5414,20 @@ define void @local_atomic_fmin_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX90A-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX90A-NEXT: v_min_f32_e32 v2, 4.0, v2
-; GFX90A-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX90A-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX90A-NEXT: v_add3_u32 v3, v3, v2, s6
-; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
-; GFX90A-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
-; GFX90A-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX90A-NEXT: v_and_or_b32 v2, v1, s7, v2
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX90A-NEXT: v_mov_b32_e32 v2, v1
+; GFX90A-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX90A-NEXT: v_min_f32_e32 v1, 4.0, v1
+; GFX90A-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX90A-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX90A-NEXT: v_add3_u32 v3, v3, v1, s6
+; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
+; GFX90A-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
+; GFX90A-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX90A-NEXT: v_and_or_b32 v1, v2, s7, v1
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v1, v2
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB19_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5432,20 +5444,20 @@ define void @local_atomic_fmin_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX908-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX908-NEXT: v_min_f32_e32 v2, 4.0, v2
-; GFX908-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX908-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX908-NEXT: v_add3_u32 v3, v3, v2, s6
-; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
-; GFX908-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
-; GFX908-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX908-NEXT: v_and_or_b32 v2, v1, s7, v2
-; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX908-NEXT: v_mov_b32_e32 v2, v1
+; GFX908-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX908-NEXT: v_min_f32_e32 v1, 4.0, v1
+; GFX908-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX908-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX908-NEXT: v_add3_u32 v3, v3, v1, s6
+; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
+; GFX908-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
+; GFX908-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX908-NEXT: v_and_or_b32 v1, v2, s7, v1
+; GFX908-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v1, v2
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB19_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5461,21 +5473,21 @@ define void @local_atomic_fmin_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX8-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX8-NEXT: v_min_f32_e32 v2, 4.0, v2
-; GFX8-NEXT: v_bfe_u32 v4, v2, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v2
+; GFX8-NEXT: v_mov_b32_e32 v2, v1
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX8-NEXT: v_min_f32_e32 v1, 4.0, v1
+; GFX8-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v1
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0x7fff, v4
-; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v2
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
-; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX8-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc
-; GFX8-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
+; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v4, v5, vcc
+; GFX8-NEXT: v_or_b32_sdwa v1, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v1, v2
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB19_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5491,17 +5503,17 @@ define void @local_atomic_fmin_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX7-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GFX7-NEXT: v_min_f32_e32 v2, 4.0, v2
-; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX7-NEXT: v_or_b32_e32 v2, v3, v2
-; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX7-NEXT: v_mov_b32_e32 v2, v1
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
+; GFX7-NEXT: v_min_f32_e32 v1, 4.0, v1
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v1, v3, v1
+; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v1, v2
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB19_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5518,17 +5530,17 @@ define void @local_atomic_fmin_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX6-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX6-NEXT: v_mul_f32_e32 v2, 1.0, v2
-; GFX6-NEXT: v_min_f32_e32 v2, 4.0, v2
-; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX6-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX6-NEXT: v_or_b32_e32 v2, v3, v2
-; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX6-NEXT: v_mov_b32_e32 v2, v1
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX6-NEXT: v_mul_f32_e32 v1, 1.0, v1
+; GFX6-NEXT: v_min_f32_e32 v1, 4.0, v1
+; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX6-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_or_b32_e32 v1, v3, v1
+; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v1, v2
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB19_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6101,15 +6113,15 @@ define void @local_atomic_fmin_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX12-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_pk_max_num_f16 v3, v2, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_pk_min_num_f16 v3, v3, v1
+; GFX12-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_pk_max_num_f16 v2, v3, v3
+; GFX12-NEXT: v_pk_min_num_f16 v2, v2, v1
; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v2
+; GFX12-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v3
; GFX12-NEXT: s_wait_dscnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX12-NEXT: v_mov_b32_e32 v2, v3
+; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-NEXT: s_wait_alu 0xfffe
@@ -6129,14 +6141,14 @@ define void @local_atomic_fmin_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX942-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_pk_max_f16 v3, v2, v2
+; GFX942-NEXT: v_mov_b32_e32 v3, v2
+; GFX942-NEXT: v_pk_max_f16 v2, v3, v3
; GFX942-NEXT: s_nop 0
-; GFX942-NEXT: v_pk_min_f16 v3, v3, v1
-; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3
+; GFX942-NEXT: v_pk_min_f16 v2, v2, v1
+; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v2, v3
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB22_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6152,15 +6164,15 @@ define void @local_atomic_fmin_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX11-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_pk_max_f16 v3, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_pk_min_f16 v3, v3, v1
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_pk_max_f16 v2, v3, v3
+; GFX11-NEXT: v_pk_min_f16 v2, v2, v1
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v2
+; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v3
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX11-NEXT: v_mov_b32_e32 v2, v3
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -6178,14 +6190,14 @@ define void @local_atomic_fmin_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX10-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_pk_max_f16 v3, v2, v2
-; GFX10-NEXT: v_pk_min_f16 v3, v3, v1
+; GFX10-NEXT: v_mov_b32_e32 v3, v2
+; GFX10-NEXT: v_pk_max_f16 v2, v3, v3
+; GFX10-NEXT: v_pk_min_f16 v2, v2, v1
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3
+; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX10-NEXT: v_mov_b32_e32 v2, v3
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB22_1
@@ -6202,13 +6214,13 @@ define void @local_atomic_fmin_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX90A-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_pk_max_f16 v3, v2, v2
-; GFX90A-NEXT: v_pk_min_f16 v3, v3, v1
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3
+; GFX90A-NEXT: v_mov_b32_e32 v3, v2
+; GFX90A-NEXT: v_pk_max_f16 v2, v3, v3
+; GFX90A-NEXT: v_pk_min_f16 v2, v2, v1
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v2, v3
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB22_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6224,13 +6236,13 @@ define void @local_atomic_fmin_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX908-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_pk_max_f16 v3, v2, v2
-; GFX908-NEXT: v_pk_min_f16 v3, v3, v1
-; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3
+; GFX908-NEXT: v_mov_b32_e32 v3, v2
+; GFX908-NEXT: v_pk_max_f16 v2, v3, v3
+; GFX908-NEXT: v_pk_min_f16 v2, v2, v1
+; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v2, v3
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB22_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6248,16 +6260,16 @@ define void @local_atomic_fmin_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX8-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_max_f16_sdwa v4, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_max_f16_e32 v5, v3, v3
-; GFX8-NEXT: v_min_f16_sdwa v4, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_max_f16_sdwa v3, v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_e32 v5, v4, v4
+; GFX8-NEXT: v_min_f16_sdwa v3, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX8-NEXT: v_min_f16_e32 v5, v5, v1
-; GFX8-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB22_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6363,15 +6375,15 @@ define void @local_atomic_fmin_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX12-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_pk_max_num_f16 v3, v2, v2
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-NEXT: v_pk_min_num_f16 v3, v3, v1
+; GFX12-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-NEXT: v_pk_max_num_f16 v2, v3, v3
+; GFX12-NEXT: v_pk_min_num_f16 v2, v2, v1
; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v2 offset:65532
+; GFX12-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v3 offset:65532
; GFX12-NEXT: s_wait_dscnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX12-NEXT: v_mov_b32_e32 v2, v3
+; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-NEXT: s_wait_alu 0xfffe
@@ -6391,14 +6403,14 @@ define void @local_atomic_fmin_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX942-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_pk_max_f16 v3, v2, v2
+; GFX942-NEXT: v_mov_b32_e32 v3, v2
+; GFX942-NEXT: v_pk_max_f16 v2, v3, v3
; GFX942-NEXT: s_nop 0
-; GFX942-NEXT: v_pk_min_f16 v3, v3, v1
-; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532
+; GFX942-NEXT: v_pk_min_f16 v2, v2, v1
+; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v2, v3
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB23_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6414,15 +6426,15 @@ define void @local_atomic_fmin_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX11-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_pk_max_f16 v3, v2, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_pk_min_f16 v3, v3, v1
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_pk_max_f16 v2, v3, v3
+; GFX11-NEXT: v_pk_min_f16 v2, v2, v1
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v2 offset:65532
+; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v3 offset:65532
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX11-NEXT: v_mov_b32_e32 v2, v3
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -6440,14 +6452,14 @@ define void @local_atomic_fmin_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX10-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_pk_max_f16 v3, v2, v2
-; GFX10-NEXT: v_pk_min_f16 v3, v3, v1
+; GFX10-NEXT: v_mov_b32_e32 v3, v2
+; GFX10-NEXT: v_pk_max_f16 v2, v3, v3
+; GFX10-NEXT: v_pk_min_f16 v2, v2, v1
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532
+; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX10-NEXT: v_mov_b32_e32 v2, v3
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB23_1
@@ -6464,13 +6476,13 @@ define void @local_atomic_fmin_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX90A-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_pk_max_f16 v3, v2, v2
-; GFX90A-NEXT: v_pk_min_f16 v3, v3, v1
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532
+; GFX90A-NEXT: v_mov_b32_e32 v3, v2
+; GFX90A-NEXT: v_pk_max_f16 v2, v3, v3
+; GFX90A-NEXT: v_pk_min_f16 v2, v2, v1
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v2, v3
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB23_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6486,13 +6498,13 @@ define void @local_atomic_fmin_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX908-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_pk_max_f16 v3, v2, v2
-; GFX908-NEXT: v_pk_min_f16 v3, v3, v1
-; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532
+; GFX908-NEXT: v_mov_b32_e32 v3, v2
+; GFX908-NEXT: v_pk_max_f16 v2, v3, v3
+; GFX908-NEXT: v_pk_min_f16 v2, v2, v1
+; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v2, v3
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB23_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6510,16 +6522,16 @@ define void @local_atomic_fmin_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX8-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_max_f16_sdwa v4, v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_max_f16_e32 v5, v3, v3
-; GFX8-NEXT: v_min_f16_sdwa v4, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_max_f16_sdwa v3, v4, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_max_f16_e32 v5, v4, v4
+; GFX8-NEXT: v_min_f16_sdwa v3, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX8-NEXT: v_min_f16_e32 v5, v5, v1
-; GFX8-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB23_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7589,31 +7601,34 @@ define void @local_atomic_fmin_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX12-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_dual_min_num_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v4, v4, v1
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v5, v5, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v3, v3, v1
; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo
; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v5, v3
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v5, v4
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -7638,32 +7653,33 @@ define void @local_atomic_fmin_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX12-FAKE16-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_dual_min_num_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v4, v4, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v5, v5, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v3, v3, v2
; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4
+; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
-; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v3, v6, v8, s0
+; GFX12-FAKE16-NEXT: v_perm_b32 v3, v5, v3, 0x7060302
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -7686,27 +7702,27 @@ define void @local_atomic_fmin_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX942-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX942-NEXT: v_min_f32_e32 v4, v4, v2
+; GFX942-NEXT: v_mov_b32_e32 v4, v3
+; GFX942-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX942-NEXT: v_min_f32_e32 v3, v3, v2
; GFX942-NEXT: v_min_f32_e32 v5, v5, v1
-; GFX942-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX942-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX942-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX942-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX942-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX942-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX942-NEXT: v_add3_u32 v6, v6, v4, s4
+; GFX942-NEXT: v_add3_u32 v6, v6, v3, s4
; GFX942-NEXT: v_add3_u32 v8, v8, v5, s4
; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX942-NEXT: v_cmp_u_f32_e64 s[0:1], v4, v4
+; GFX942-NEXT: v_cmp_u_f32_e64 s[0:1], v3, v3
; GFX942-NEXT: s_nop 0
; GFX942-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX942-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[0:1]
-; GFX942-NEXT: v_perm_b32 v4, v5, v4, s5
-; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX942-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[0:1]
+; GFX942-NEXT: v_perm_b32 v3, v5, v3, s5
+; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; GFX942-NEXT: v_mov_b32_e32 v3, v4
; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_cbranch_execnz .LBB26_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7724,30 +7740,32 @@ define void @local_atomic_fmin_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX11-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_dual_min_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX11-TRUE16-NEXT: v_min_f32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v3, v3, v1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v5, v3
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v5, v4
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -7768,30 +7786,32 @@ define void @local_atomic_fmin_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX11-FAKE16-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_dual_min_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX11-FAKE16-NEXT: v_min_f32_e32 v4, v4, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v5, v5, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v3, v3, v2
; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v6, v8, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v5, v3, 0x7060302
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
@@ -7811,27 +7831,27 @@ define void @local_atomic_fmin_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX10-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX10-NEXT: v_min_f32_e32 v4, v4, v2
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX10-NEXT: v_min_f32_e32 v3, v3, v2
; GFX10-NEXT: v_min_f32_e32 v5, v5, v1
-; GFX10-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX10-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX10-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v3
; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX10-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX10-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
; GFX10-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX10-NEXT: v_cmp_u_f32_e64 s4, v4, v4
+; GFX10-NEXT: v_cmp_u_f32_e64 s4, v3, v3
; GFX10-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e64 v4, v6, v8, s4
-; GFX10-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v6, v8, s4
+; GFX10-NEXT: v_perm_b32 v3, v5, v3, 0x7060302
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX10-NEXT: v_mov_b32_e32 v3, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5
; GFX10-NEXT: s_cbranch_execnz .LBB26_1
@@ -7851,26 +7871,26 @@ define void @local_atomic_fmin_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX90A-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX90A-NEXT: v_min_f32_e32 v4, v4, v2
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX90A-NEXT: v_min_f32_e32 v3, v3, v2
; GFX90A-NEXT: v_min_f32_e32 v5, v5, v1
-; GFX90A-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX90A-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX90A-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX90A-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX90A-NEXT: v_add3_u32 v6, v6, v4, s8
+; GFX90A-NEXT: v_add3_u32 v6, v6, v3, s8
; GFX90A-NEXT: v_add3_u32 v8, v8, v5, s8
; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
-; GFX90A-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
+; GFX90A-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX90A-NEXT: v_perm_b32 v4, v5, v4, s9
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX90A-NEXT: v_perm_b32 v3, v5, v3, s9
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX90A-NEXT: s_cbranch_execnz .LBB26_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7889,26 +7909,26 @@ define void @local_atomic_fmin_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX908-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX908-NEXT: v_min_f32_e32 v4, v4, v2
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX908-NEXT: v_min_f32_e32 v3, v3, v2
; GFX908-NEXT: v_min_f32_e32 v5, v5, v1
-; GFX908-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX908-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX908-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX908-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX908-NEXT: v_add3_u32 v6, v6, v4, s8
+; GFX908-NEXT: v_add3_u32 v6, v6, v3, s8
; GFX908-NEXT: v_add3_u32 v8, v8, v5, s8
; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
-; GFX908-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
+; GFX908-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX908-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX908-NEXT: v_perm_b32 v4, v5, v4, s9
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX908-NEXT: v_perm_b32 v3, v5, v3, s9
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX908-NEXT: s_cbranch_execnz .LBB26_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7926,29 +7946,29 @@ define void @local_atomic_fmin_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX8-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX8-NEXT: v_min_f32_e32 v4, v4, v2
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX8-NEXT: v_min_f32_e32 v3, v3, v2
; GFX8-NEXT: v_min_f32_e32 v5, v5, v1
-; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX8-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3
; GFX8-NEXT: v_add_u32_e32 v8, vcc, v8, v5
; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6
; GFX8-NEXT: v_add_u32_e32 v8, vcc, 0x7fff, v8
; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4
-; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
+; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3
+; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
; GFX8-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX8-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX8-NEXT: v_alignbit_b32 v4, v5, v4, 16
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX8-NEXT: v_alignbit_b32 v3, v5, v3, 16
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX8-NEXT: s_cbranch_execnz .LBB26_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -8047,31 +8067,34 @@ define void @local_atomic_fmin_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX12-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_dual_min_num_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v4, v4, v1
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v5, v5, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-TRUE16-NEXT: v_min_num_f32_e32 v3, v3, v1
; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo
; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v5, v3 offset:65532
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v5, v4 offset:65532
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -8096,32 +8119,33 @@ define void @local_atomic_fmin_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX12-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_dual_min_num_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v4, v4, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v5, v5, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX12-FAKE16-NEXT: v_min_num_f32_e32 v3, v3, v2
; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4
+; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
-; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v3, v6, v8, s0
+; GFX12-FAKE16-NEXT: v_perm_b32 v3, v5, v3, 0x7060302
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 offset:65532
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 offset:65532
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -8144,27 +8168,27 @@ define void @local_atomic_fmin_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX942-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX942-NEXT: v_min_f32_e32 v4, v4, v2
+; GFX942-NEXT: v_mov_b32_e32 v4, v3
+; GFX942-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX942-NEXT: v_min_f32_e32 v3, v3, v2
; GFX942-NEXT: v_min_f32_e32 v5, v5, v1
-; GFX942-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX942-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX942-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX942-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX942-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX942-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX942-NEXT: v_add3_u32 v6, v6, v4, s4
+; GFX942-NEXT: v_add3_u32 v6, v6, v3, s4
; GFX942-NEXT: v_add3_u32 v8, v8, v5, s4
; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX942-NEXT: v_cmp_u_f32_e64 s[0:1], v4, v4
+; GFX942-NEXT: v_cmp_u_f32_e64 s[0:1], v3, v3
; GFX942-NEXT: s_nop 0
; GFX942-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX942-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[0:1]
-; GFX942-NEXT: v_perm_b32 v4, v5, v4, s5
-; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532
+; GFX942-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[0:1]
+; GFX942-NEXT: v_perm_b32 v3, v5, v3, s5
+; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; GFX942-NEXT: v_mov_b32_e32 v3, v4
; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_cbranch_execnz .LBB27_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -8182,30 +8206,32 @@ define void @local_atomic_fmin_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX11-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_dual_min_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX11-TRUE16-NEXT: v_min_f32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_min_f32_e32 v3, v3, v1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v5, v3 offset:65532
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v5, v4 offset:65532
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -8226,30 +8252,32 @@ define void @local_atomic_fmin_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX11-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_dual_min_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX11-FAKE16-NEXT: v_min_f32_e32 v4, v4, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v5, v5, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_min_f32_e32 v3, v3, v2
; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v6, v8, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v5, v3, 0x7060302
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 offset:65532
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 offset:65532
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
@@ -8269,27 +8297,27 @@ define void @local_atomic_fmin_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX10-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX10-NEXT: v_min_f32_e32 v4, v4, v2
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX10-NEXT: v_min_f32_e32 v3, v3, v2
; GFX10-NEXT: v_min_f32_e32 v5, v5, v1
-; GFX10-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX10-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX10-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v3
; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX10-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX10-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
; GFX10-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX10-NEXT: v_cmp_u_f32_e64 s4, v4, v4
+; GFX10-NEXT: v_cmp_u_f32_e64 s4, v3, v3
; GFX10-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e64 v4, v6, v8, s4
-; GFX10-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v6, v8, s4
+; GFX10-NEXT: v_perm_b32 v3, v5, v3, 0x7060302
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX10-NEXT: v_mov_b32_e32 v3, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5
; GFX10-NEXT: s_cbranch_execnz .LBB27_1
@@ -8309,26 +8337,26 @@ define void @local_atomic_fmin_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX90A-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX90A-NEXT: v_min_f32_e32 v4, v4, v2
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX90A-NEXT: v_min_f32_e32 v3, v3, v2
; GFX90A-NEXT: v_min_f32_e32 v5, v5, v1
-; GFX90A-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX90A-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX90A-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX90A-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX90A-NEXT: v_add3_u32 v6, v6, v4, s8
+; GFX90A-NEXT: v_add3_u32 v6, v6, v3, s8
; GFX90A-NEXT: v_add3_u32 v8, v8, v5, s8
; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
-; GFX90A-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
+; GFX90A-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX90A-NEXT: v_perm_b32 v4, v5, v4, s9
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532
+; GFX90A-NEXT: v_perm_b32 v3, v5, v3, s9
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX90A-NEXT: s_cbranch_execnz .LBB27_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -8347,26 +8375,26 @@ define void @local_atomic_fmin_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX908-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX908-NEXT: v_min_f32_e32 v4, v4, v2
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX908-NEXT: v_min_f32_e32 v3, v3, v2
; GFX908-NEXT: v_min_f32_e32 v5, v5, v1
-; GFX908-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX908-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX908-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX908-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX908-NEXT: v_add3_u32 v6, v6, v4, s8
+; GFX908-NEXT: v_add3_u32 v6, v6, v3, s8
; GFX908-NEXT: v_add3_u32 v8, v8, v5, s8
; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
-; GFX908-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
+; GFX908-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX908-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX908-NEXT: v_perm_b32 v4, v5, v4, s9
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532
+; GFX908-NEXT: v_perm_b32 v3, v5, v3, s9
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX908-NEXT: s_cbranch_execnz .LBB27_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -8384,29 +8412,29 @@ define void @local_atomic_fmin_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX8-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX8-NEXT: v_min_f32_e32 v4, v4, v2
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX8-NEXT: v_min_f32_e32 v3, v3, v2
; GFX8-NEXT: v_min_f32_e32 v5, v5, v1
-; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX8-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3
; GFX8-NEXT: v_add_u32_e32 v8, vcc, v8, v5
; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6
; GFX8-NEXT: v_add_u32_e32 v8, vcc, 0x7fff, v8
; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4
-; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
+; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3
+; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
; GFX8-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX8-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX8-NEXT: v_alignbit_b32 v4, v5, v4, 16
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532
+; GFX8-NEXT: v_alignbit_b32 v3, v5, v3, 16
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX8-NEXT: s_cbranch_execnz .LBB27_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
diff --git a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fsub.ll b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fsub.ll
index d74338c..929bb61 100644
--- a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fsub.ll
+++ b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fsub.ll
@@ -453,13 +453,14 @@ define void @local_atomic_fsub_noret_f32(ptr addrspace(3) %ptr) nounwind {
; GFX12-NEXT: .LBB2_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_add_f32_e32 v2, -4.0, v1
+; GFX12-NEXT: v_mov_b32_e32 v2, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_add_f32_e32 v1, -4.0, v2
; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1
+; GFX12-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2
; GFX12-NEXT: s_wait_dscnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-NEXT: s_wait_alu 0xfffe
@@ -478,12 +479,12 @@ define void @local_atomic_fsub_noret_f32(ptr addrspace(3) %ptr) nounwind {
; GFX942-NEXT: .LBB2_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_add_f32_e32 v2, -4.0, v1
-; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX942-NEXT: v_mov_b32_e32 v2, v1
+; GFX942-NEXT: v_add_f32_e32 v1, -4.0, v2
+; GFX942-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v1, v2
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB2_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -498,13 +499,14 @@ define void @local_atomic_fsub_noret_f32(ptr addrspace(3) %ptr) nounwind {
; GFX11-NEXT: .LBB2_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_add_f32_e32 v2, -4.0, v1
+; GFX11-NEXT: v_mov_b32_e32 v2, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_f32_e32 v1, -4.0, v2
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1
+; GFX11-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -521,13 +523,13 @@ define void @local_atomic_fsub_noret_f32(ptr addrspace(3) %ptr) nounwind {
; GFX10-NEXT: .LBB2_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_add_f32_e32 v2, -4.0, v1
+; GFX10-NEXT: v_mov_b32_e32 v2, v1
+; GFX10-NEXT: v_add_f32_e32 v1, -4.0, v2
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX10-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX10-NEXT: v_mov_b32_e32 v1, v2
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB2_1
@@ -543,12 +545,12 @@ define void @local_atomic_fsub_noret_f32(ptr addrspace(3) %ptr) nounwind {
; GFX90A-NEXT: .LBB2_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_add_f32_e32 v2, -4.0, v1
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX90A-NEXT: v_mov_b32_e32 v2, v1
+; GFX90A-NEXT: v_add_f32_e32 v1, -4.0, v2
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v1, v2
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB2_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -563,12 +565,12 @@ define void @local_atomic_fsub_noret_f32(ptr addrspace(3) %ptr) nounwind {
; GFX908-NEXT: .LBB2_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_add_f32_e32 v2, -4.0, v1
-; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX908-NEXT: v_mov_b32_e32 v2, v1
+; GFX908-NEXT: v_add_f32_e32 v1, -4.0, v2
+; GFX908-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v1, v2
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB2_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -584,12 +586,12 @@ define void @local_atomic_fsub_noret_f32(ptr addrspace(3) %ptr) nounwind {
; GFX8-NEXT: .LBB2_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_add_f32_e32 v2, -4.0, v1
-; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX8-NEXT: v_mov_b32_e32 v2, v1
+; GFX8-NEXT: v_add_f32_e32 v1, -4.0, v2
+; GFX8-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v1, v2
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB2_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -605,12 +607,12 @@ define void @local_atomic_fsub_noret_f32(ptr addrspace(3) %ptr) nounwind {
; GFX7-NEXT: .LBB2_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_add_f32_e32 v2, -4.0, v1
-; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX7-NEXT: v_mov_b32_e32 v2, v1
+; GFX7-NEXT: v_add_f32_e32 v1, -4.0, v2
+; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v1, v2
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB2_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -626,12 +628,12 @@ define void @local_atomic_fsub_noret_f32(ptr addrspace(3) %ptr) nounwind {
; GFX6-NEXT: .LBB2_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_add_f32_e32 v2, -4.0, v1
-; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX6-NEXT: v_mov_b32_e32 v2, v1
+; GFX6-NEXT: v_add_f32_e32 v1, -4.0, v2
+; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v1, v2
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB2_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -654,13 +656,14 @@ define void @local_atomic_fsub_noret_f32__offset(ptr addrspace(3) %ptr) nounwind
; GFX12-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_add_f32_e32 v2, -4.0, v1
+; GFX12-NEXT: v_mov_b32_e32 v2, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_add_f32_e32 v1, -4.0, v2
; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65532
+; GFX12-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65532
; GFX12-NEXT: s_wait_dscnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-NEXT: s_wait_alu 0xfffe
@@ -679,12 +682,12 @@ define void @local_atomic_fsub_noret_f32__offset(ptr addrspace(3) %ptr) nounwind
; GFX942-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_add_f32_e32 v2, -4.0, v1
-; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65532
+; GFX942-NEXT: v_mov_b32_e32 v2, v1
+; GFX942-NEXT: v_add_f32_e32 v1, -4.0, v2
+; GFX942-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65532
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v1, v2
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB3_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -699,13 +702,14 @@ define void @local_atomic_fsub_noret_f32__offset(ptr addrspace(3) %ptr) nounwind
; GFX11-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_add_f32_e32 v2, -4.0, v1
+; GFX11-NEXT: v_mov_b32_e32 v2, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_f32_e32 v1, -4.0, v2
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65532
+; GFX11-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65532
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -722,13 +726,13 @@ define void @local_atomic_fsub_noret_f32__offset(ptr addrspace(3) %ptr) nounwind
; GFX10-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_add_f32_e32 v2, -4.0, v1
+; GFX10-NEXT: v_mov_b32_e32 v2, v1
+; GFX10-NEXT: v_add_f32_e32 v1, -4.0, v2
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65532
+; GFX10-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65532
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX10-NEXT: v_mov_b32_e32 v1, v2
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB3_1
@@ -744,12 +748,12 @@ define void @local_atomic_fsub_noret_f32__offset(ptr addrspace(3) %ptr) nounwind
; GFX90A-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_add_f32_e32 v2, -4.0, v1
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65532
+; GFX90A-NEXT: v_mov_b32_e32 v2, v1
+; GFX90A-NEXT: v_add_f32_e32 v1, -4.0, v2
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65532
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v1, v2
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB3_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -764,12 +768,12 @@ define void @local_atomic_fsub_noret_f32__offset(ptr addrspace(3) %ptr) nounwind
; GFX908-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_add_f32_e32 v2, -4.0, v1
-; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65532
+; GFX908-NEXT: v_mov_b32_e32 v2, v1
+; GFX908-NEXT: v_add_f32_e32 v1, -4.0, v2
+; GFX908-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65532
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v1, v2
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB3_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -785,12 +789,12 @@ define void @local_atomic_fsub_noret_f32__offset(ptr addrspace(3) %ptr) nounwind
; GFX8-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_add_f32_e32 v2, -4.0, v1
-; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65532
+; GFX8-NEXT: v_mov_b32_e32 v2, v1
+; GFX8-NEXT: v_add_f32_e32 v1, -4.0, v2
+; GFX8-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65532
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v1, v2
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB3_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -806,12 +810,12 @@ define void @local_atomic_fsub_noret_f32__offset(ptr addrspace(3) %ptr) nounwind
; GFX7-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_add_f32_e32 v2, -4.0, v1
-; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65532
+; GFX7-NEXT: v_mov_b32_e32 v2, v1
+; GFX7-NEXT: v_add_f32_e32 v1, -4.0, v2
+; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65532
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v1, v2
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB3_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -828,12 +832,12 @@ define void @local_atomic_fsub_noret_f32__offset(ptr addrspace(3) %ptr) nounwind
; GFX6-NEXT: .LBB3_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_add_f32_e32 v2, -4.0, v1
-; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX6-NEXT: v_mov_b32_e32 v2, v1
+; GFX6-NEXT: v_add_f32_e32 v1, -4.0, v2
+; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v1, v2
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB3_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1296,13 +1300,14 @@ define void @local_atomic_fsub_noret_f64(ptr addrspace(3) %ptr) nounwind {
; GFX12-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_add_f64_e32 v[3:4], -4.0, v[1:2]
+; GFX12-NEXT: v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v3, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_add_f64_e32 v[1:2], -4.0, v[3:4]
; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b64 v[3:4], v0, v[3:4], v[1:2]
+; GFX12-NEXT: ds_cmpstore_rtn_b64 v[1:2], v0, v[1:2], v[3:4]
; GFX12-NEXT: s_wait_dscnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[3:4], v[1:2]
-; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[1:2], v[3:4]
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-NEXT: s_wait_alu 0xfffe
@@ -1321,12 +1326,12 @@ define void @local_atomic_fsub_noret_f64(ptr addrspace(3) %ptr) nounwind {
; GFX942-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_add_f64 v[4:5], v[2:3], -4.0
-; GFX942-NEXT: ds_cmpst_rtn_b64 v[4:5], v0, v[2:3], v[4:5]
+; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[2:3]
+; GFX942-NEXT: v_add_f64 v[2:3], v[4:5], -4.0
+; GFX942-NEXT: ds_cmpst_rtn_b64 v[2:3], v0, v[4:5], v[2:3]
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; GFX942-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b64_e32 v[2:3], v[4:5]
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB6_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1341,13 +1346,14 @@ define void @local_atomic_fsub_noret_f64(ptr addrspace(3) %ptr) nounwind {
; GFX11-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_add_f64 v[3:4], v[1:2], -4.0
+; GFX11-NEXT: v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v3, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_f64 v[1:2], v[3:4], -4.0
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b64 v[3:4], v0, v[3:4], v[1:2]
+; GFX11-NEXT: ds_cmpstore_rtn_b64 v[1:2], v0, v[1:2], v[3:4]
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[3:4], v[1:2]
-; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[1:2], v[3:4]
; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -1364,14 +1370,14 @@ define void @local_atomic_fsub_noret_f64(ptr addrspace(3) %ptr) nounwind {
; GFX10-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_add_f64 v[3:4], v[1:2], -4.0
+; GFX10-NEXT: v_mov_b32_e32 v4, v2
+; GFX10-NEXT: v_mov_b32_e32 v3, v1
+; GFX10-NEXT: v_add_f64 v[1:2], v[3:4], -4.0
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4]
+; GFX10-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2]
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[3:4], v[1:2]
-; GFX10-NEXT: v_mov_b32_e32 v1, v3
-; GFX10-NEXT: v_mov_b32_e32 v2, v4
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[1:2], v[3:4]
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB6_1
@@ -1387,12 +1393,12 @@ define void @local_atomic_fsub_noret_f64(ptr addrspace(3) %ptr) nounwind {
; GFX90A-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_add_f64 v[4:5], v[2:3], -4.0
-; GFX90A-NEXT: ds_cmpst_rtn_b64 v[4:5], v0, v[2:3], v[4:5]
+; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[2:3], v[2:3] op_sel:[0,1]
+; GFX90A-NEXT: v_add_f64 v[2:3], v[4:5], -4.0
+; GFX90A-NEXT: ds_cmpst_rtn_b64 v[2:3], v0, v[4:5], v[2:3]
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[4:5], v[4:5] op_sel:[0,1]
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB6_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1407,13 +1413,13 @@ define void @local_atomic_fsub_noret_f64(ptr addrspace(3) %ptr) nounwind {
; GFX908-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_add_f64 v[3:4], v[1:2], -4.0
-; GFX908-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4]
+; GFX908-NEXT: v_mov_b32_e32 v4, v2
+; GFX908-NEXT: v_mov_b32_e32 v3, v1
+; GFX908-NEXT: v_add_f64 v[1:2], v[3:4], -4.0
+; GFX908-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2]
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2]
-; GFX908-NEXT: v_mov_b32_e32 v1, v3
+; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[1:2], v[3:4]
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v2, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB6_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1429,13 +1435,13 @@ define void @local_atomic_fsub_noret_f64(ptr addrspace(3) %ptr) nounwind {
; GFX8-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_add_f64 v[3:4], v[1:2], -4.0
-; GFX8-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4]
+; GFX8-NEXT: v_mov_b32_e32 v4, v2
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: v_add_f64 v[1:2], v[3:4], -4.0
+; GFX8-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2]
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2]
-; GFX8-NEXT: v_mov_b32_e32 v1, v3
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[1:2], v[3:4]
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v2, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB6_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1451,13 +1457,13 @@ define void @local_atomic_fsub_noret_f64(ptr addrspace(3) %ptr) nounwind {
; GFX7-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_add_f64 v[3:4], v[1:2], -4.0
-; GFX7-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4]
+; GFX7-NEXT: v_mov_b32_e32 v4, v2
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: v_add_f64 v[1:2], v[3:4], -4.0
+; GFX7-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2]
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2]
-; GFX7-NEXT: v_mov_b32_e32 v1, v3
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[1:2], v[3:4]
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v2, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB6_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1473,13 +1479,13 @@ define void @local_atomic_fsub_noret_f64(ptr addrspace(3) %ptr) nounwind {
; GFX6-NEXT: .LBB6_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_add_f64 v[3:4], v[1:2], -4.0
-; GFX6-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4]
+; GFX6-NEXT: v_mov_b32_e32 v4, v2
+; GFX6-NEXT: v_mov_b32_e32 v3, v1
+; GFX6-NEXT: v_add_f64 v[1:2], v[3:4], -4.0
+; GFX6-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2]
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2]
-; GFX6-NEXT: v_mov_b32_e32 v1, v3
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[1:2], v[3:4]
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v2, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB6_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1502,13 +1508,14 @@ define void @local_atomic_fsub_noret_f64__offset(ptr addrspace(3) %ptr) nounwind
; GFX12-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_add_f64_e32 v[3:4], -4.0, v[1:2]
+; GFX12-NEXT: v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v3, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_add_f64_e32 v[1:2], -4.0, v[3:4]
; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b64 v[3:4], v0, v[3:4], v[1:2] offset:65528
+; GFX12-NEXT: ds_cmpstore_rtn_b64 v[1:2], v0, v[1:2], v[3:4] offset:65528
; GFX12-NEXT: s_wait_dscnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[3:4], v[1:2]
-; GFX12-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[1:2], v[3:4]
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-NEXT: s_wait_alu 0xfffe
@@ -1527,12 +1534,12 @@ define void @local_atomic_fsub_noret_f64__offset(ptr addrspace(3) %ptr) nounwind
; GFX942-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_add_f64 v[4:5], v[2:3], -4.0
-; GFX942-NEXT: ds_cmpst_rtn_b64 v[4:5], v0, v[2:3], v[4:5] offset:65528
+; GFX942-NEXT: v_mov_b64_e32 v[4:5], v[2:3]
+; GFX942-NEXT: v_add_f64 v[2:3], v[4:5], -4.0
+; GFX942-NEXT: ds_cmpst_rtn_b64 v[2:3], v0, v[4:5], v[2:3] offset:65528
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; GFX942-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b64_e32 v[2:3], v[4:5]
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB7_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1547,13 +1554,14 @@ define void @local_atomic_fsub_noret_f64__offset(ptr addrspace(3) %ptr) nounwind
; GFX11-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_add_f64 v[3:4], v[1:2], -4.0
+; GFX11-NEXT: v_dual_mov_b32 v4, v2 :: v_dual_mov_b32 v3, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_f64 v[1:2], v[3:4], -4.0
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b64 v[3:4], v0, v[3:4], v[1:2] offset:65528
+; GFX11-NEXT: ds_cmpstore_rtn_b64 v[1:2], v0, v[1:2], v[3:4] offset:65528
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[3:4], v[1:2]
-; GFX11-NEXT: v_dual_mov_b32 v1, v3 :: v_dual_mov_b32 v2, v4
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[1:2], v[3:4]
; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -1570,14 +1578,14 @@ define void @local_atomic_fsub_noret_f64__offset(ptr addrspace(3) %ptr) nounwind
; GFX10-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_add_f64 v[3:4], v[1:2], -4.0
+; GFX10-NEXT: v_mov_b32_e32 v4, v2
+; GFX10-NEXT: v_mov_b32_e32 v3, v1
+; GFX10-NEXT: v_add_f64 v[1:2], v[3:4], -4.0
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4] offset:65528
+; GFX10-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2] offset:65528
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[3:4], v[1:2]
-; GFX10-NEXT: v_mov_b32_e32 v1, v3
-; GFX10-NEXT: v_mov_b32_e32 v2, v4
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[1:2], v[3:4]
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB7_1
@@ -1593,12 +1601,12 @@ define void @local_atomic_fsub_noret_f64__offset(ptr addrspace(3) %ptr) nounwind
; GFX90A-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_add_f64 v[4:5], v[2:3], -4.0
-; GFX90A-NEXT: ds_cmpst_rtn_b64 v[4:5], v0, v[2:3], v[4:5] offset:65528
+; GFX90A-NEXT: v_pk_mov_b32 v[4:5], v[2:3], v[2:3] op_sel:[0,1]
+; GFX90A-NEXT: v_add_f64 v[2:3], v[4:5], -4.0
+; GFX90A-NEXT: ds_cmpst_rtn_b64 v[2:3], v0, v[4:5], v[2:3] offset:65528
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_pk_mov_b32 v[2:3], v[4:5], v[4:5] op_sel:[0,1]
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB7_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1613,13 +1621,13 @@ define void @local_atomic_fsub_noret_f64__offset(ptr addrspace(3) %ptr) nounwind
; GFX908-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_add_f64 v[3:4], v[1:2], -4.0
-; GFX908-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4] offset:65528
+; GFX908-NEXT: v_mov_b32_e32 v4, v2
+; GFX908-NEXT: v_mov_b32_e32 v3, v1
+; GFX908-NEXT: v_add_f64 v[1:2], v[3:4], -4.0
+; GFX908-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2] offset:65528
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2]
-; GFX908-NEXT: v_mov_b32_e32 v1, v3
+; GFX908-NEXT: v_cmp_eq_u64_e32 vcc, v[1:2], v[3:4]
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v2, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB7_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1635,13 +1643,13 @@ define void @local_atomic_fsub_noret_f64__offset(ptr addrspace(3) %ptr) nounwind
; GFX8-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_add_f64 v[3:4], v[1:2], -4.0
-; GFX8-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4] offset:65528
+; GFX8-NEXT: v_mov_b32_e32 v4, v2
+; GFX8-NEXT: v_mov_b32_e32 v3, v1
+; GFX8-NEXT: v_add_f64 v[1:2], v[3:4], -4.0
+; GFX8-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2] offset:65528
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2]
-; GFX8-NEXT: v_mov_b32_e32 v1, v3
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[1:2], v[3:4]
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v2, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB7_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1657,13 +1665,13 @@ define void @local_atomic_fsub_noret_f64__offset(ptr addrspace(3) %ptr) nounwind
; GFX7-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_add_f64 v[3:4], v[1:2], -4.0
-; GFX7-NEXT: ds_cmpst_rtn_b64 v[3:4], v0, v[1:2], v[3:4] offset:65528
+; GFX7-NEXT: v_mov_b32_e32 v4, v2
+; GFX7-NEXT: v_mov_b32_e32 v3, v1
+; GFX7-NEXT: v_add_f64 v[1:2], v[3:4], -4.0
+; GFX7-NEXT: ds_cmpst_rtn_b64 v[1:2], v0, v[3:4], v[1:2] offset:65528
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[1:2]
-; GFX7-NEXT: v_mov_b32_e32 v1, v3
+; GFX7-NEXT: v_cmp_eq_u64_e32 vcc, v[1:2], v[3:4]
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v2, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB7_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -1680,13 +1688,13 @@ define void @local_atomic_fsub_noret_f64__offset(ptr addrspace(3) %ptr) nounwind
; GFX6-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_add_f64 v[3:4], v[0:1], -4.0
-; GFX6-NEXT: ds_cmpst_rtn_b64 v[3:4], v2, v[0:1], v[3:4]
+; GFX6-NEXT: v_mov_b32_e32 v4, v1
+; GFX6-NEXT: v_mov_b32_e32 v3, v0
+; GFX6-NEXT: v_add_f64 v[0:1], v[3:4], -4.0
+; GFX6-NEXT: ds_cmpst_rtn_b64 v[0:1], v2, v[3:4], v[0:1]
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[0:1]
-; GFX6-NEXT: v_mov_b32_e32 v0, v3
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[3:4]
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v1, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB7_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2494,27 +2502,27 @@ define void @local_atomic_fsub_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
-; GFX12-TRUE16-NEXT: ds_load_b32 v2, v1
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2
; GFX12-TRUE16-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0
-; GFX12-TRUE16-NEXT: v_add_f16_e32 v4.l, -4.0, v4.l
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v3.l, -4.0, v3.l
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -2535,28 +2543,28 @@ define void @local_atomic_fsub_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
-; GFX12-FAKE16-NEXT: ds_load_b32 v2, v1
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-FAKE16-NEXT: ds_load_b32 v3, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2
; GFX12-FAKE16-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX12-FAKE16-NEXT: v_add_f16_e32 v4, -4.0, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v3, -4.0, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -2581,15 +2589,15 @@ define void @local_atomic_fsub_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX942-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX942-NEXT: v_add_f16_e32 v4, -4.0, v4
-; GFX942-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX942-NEXT: v_mov_b32_e32 v4, v3
+; GFX942-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX942-NEXT: v_add_f16_e32 v3, -4.0, v3
+; GFX942-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v3, v4
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB10_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2602,27 +2610,27 @@ define void @local_atomic_fsub_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
-; GFX11-TRUE16-NEXT: ds_load_b32 v2, v1
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2
; GFX11-TRUE16-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0
-; GFX11-TRUE16-NEXT: v_add_f16_e32 v4.l, -4.0, v4.l
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v3.l, -4.0, v3.l
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -2637,28 +2645,28 @@ define void @local_atomic_fsub_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
-; GFX11-FAKE16-NEXT: ds_load_b32 v2, v1
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2
; GFX11-FAKE16-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX11-FAKE16-NEXT: v_add_f16_e32 v4, -4.0, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v3, -4.0, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -2673,23 +2681,23 @@ define void @local_atomic_fsub_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX10-NEXT: v_and_b32_e32 v1, -4, v0
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX10-NEXT: s_mov_b32 s4, 0
-; GFX10-NEXT: ds_read_b32 v2, v1
-; GFX10-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX10-NEXT: ds_read_b32 v3, v1
+; GFX10-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX10-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX10-NEXT: v_not_b32_e32 v3, v3
+; GFX10-NEXT: v_not_b32_e32 v2, v2
; GFX10-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX10-NEXT: v_add_f16_e32 v4, -4.0, v4
-; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; GFX10-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX10-NEXT: v_add_f16_e32 v3, -4.0, v3
+; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v1, v2, v4
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX10-NEXT: v_mov_b32_e32 v2, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB10_1
@@ -2711,15 +2719,15 @@ define void @local_atomic_fsub_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX90A-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX90A-NEXT: v_add_f16_e32 v4, -4.0, v4
-; GFX90A-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX90A-NEXT: v_add_f16_e32 v3, -4.0, v3
+; GFX90A-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB10_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2740,15 +2748,15 @@ define void @local_atomic_fsub_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX908-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX908-NEXT: v_add_f16_e32 v4, -4.0, v4
-; GFX908-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX908-NEXT: v_add_f16_e32 v3, -4.0, v3
+; GFX908-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB10_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2770,16 +2778,16 @@ define void @local_atomic_fsub_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX8-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX8-NEXT: v_add_f16_e32 v4, -4.0, v4
-; GFX8-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX8-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX8-NEXT: v_add_f16_e32 v3, -4.0, v3
+; GFX8-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB10_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2800,18 +2808,18 @@ define void @local_atomic_fsub_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX7-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v4
-; GFX7-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX7-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX7-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX7-NEXT: v_mov_b32_e32 v4, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX7-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX7-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX7-NEXT: v_cvt_f16_f32_e32 v3, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX7-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v3, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB10_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2832,18 +2840,18 @@ define void @local_atomic_fsub_noret_f16(ptr addrspace(3) %ptr) nounwind {
; GFX6-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX6-NEXT: v_cvt_f32_f16_e32 v4, v4
-; GFX6-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX6-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX6-NEXT: v_cvt_f16_f32_e32 v4, v4
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX6-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX6-NEXT: v_mov_b32_e32 v4, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX6-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX6-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX6-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX6-NEXT: v_cvt_f16_f32_e32 v3, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX6-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v3, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB10_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2874,19 +2882,19 @@ define void @local_atomic_fsub_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX12-TRUE16-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_add_f16_e32 v4.l, -4.0, v4.l
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v3.l, -4.0, v3.l
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -2917,19 +2925,20 @@ define void @local_atomic_fsub_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX12-FAKE16-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_add_f16_e32 v4, -4.0, v4
-; GFX12-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v3, -4.0, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -2955,15 +2964,15 @@ define void @local_atomic_fsub_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX942-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX942-NEXT: v_add_f16_e32 v4, -4.0, v4
-; GFX942-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX942-NEXT: v_mov_b32_e32 v4, v3
+; GFX942-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX942-NEXT: v_add_f16_e32 v3, -4.0, v3
+; GFX942-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v3, v4
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB11_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -2986,19 +2995,19 @@ define void @local_atomic_fsub_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX11-TRUE16-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v4.h, 0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_add_f16_e32 v4.l, -4.0, v4.l
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v3.l, -4.0, v3.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -3023,19 +3032,20 @@ define void @local_atomic_fsub_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX11-FAKE16-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_add_f16_e32 v4, -4.0, v4
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v3, -4.0, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -3058,16 +3068,16 @@ define void @local_atomic_fsub_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX10-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX10-NEXT: v_add_f16_e32 v4, -4.0, v4
-; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; GFX10-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX10-NEXT: v_add_f16_e32 v3, -4.0, v3
+; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX10-NEXT: v_mov_b32_e32 v3, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB11_1
@@ -3090,15 +3100,15 @@ define void @local_atomic_fsub_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX90A-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX90A-NEXT: v_add_f16_e32 v4, -4.0, v4
-; GFX90A-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX90A-NEXT: v_add_f16_e32 v3, -4.0, v3
+; GFX90A-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB11_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3120,15 +3130,15 @@ define void @local_atomic_fsub_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX908-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX908-NEXT: v_add_f16_e32 v4, -4.0, v4
-; GFX908-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX908-NEXT: v_add_f16_e32 v3, -4.0, v3
+; GFX908-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB11_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3151,16 +3161,16 @@ define void @local_atomic_fsub_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX8-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX8-NEXT: v_add_f16_e32 v4, -4.0, v4
-; GFX8-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX8-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX8-NEXT: v_add_f16_e32 v3, -4.0, v3
+; GFX8-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB11_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3182,18 +3192,18 @@ define void @local_atomic_fsub_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX7-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX7-NEXT: v_cvt_f32_f16_e32 v4, v4
-; GFX7-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX7-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX7-NEXT: v_cvt_f16_f32_e32 v4, v4
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX7-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX7-NEXT: v_mov_b32_e32 v4, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX7-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX7-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX7-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX7-NEXT: v_cvt_f16_f32_e32 v3, v3
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX7-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v3, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB11_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3215,18 +3225,18 @@ define void @local_atomic_fsub_noret_f16__offset(ptr addrspace(3) %ptr) nounwind
; GFX6-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX6-NEXT: v_cvt_f32_f16_e32 v4, v4
-; GFX6-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX6-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX6-NEXT: v_cvt_f16_f32_e32 v4, v4
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX6-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX6-NEXT: v_mov_b32_e32 v4, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX6-NEXT: v_cvt_f32_f16_e32 v3, v3
+; GFX6-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX6-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX6-NEXT: v_cvt_f16_f32_e32 v3, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX6-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v3, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB11_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3548,16 +3558,16 @@ define void @local_atomic_fsub_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX12-TRUE16-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: v_add_f16_e32 v2.l, -4.0, v1.l
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v2.h, 0
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v1.h, 0
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f16_e32 v1.l, -4.0, v2.l
+; GFX12-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -3580,16 +3590,17 @@ define void @local_atomic_fsub_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX12-FAKE16-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: v_add_f16_e32 v2, -4.0, v1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v1
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX12-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX12-FAKE16-NEXT: v_add_f16_e32 v1, -4.0, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -3609,13 +3620,13 @@ define void @local_atomic_fsub_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX942-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_add_f16_e32 v2, -4.0, v1
-; GFX942-NEXT: v_and_or_b32 v2, v1, s2, v2
-; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX942-NEXT: v_mov_b32_e32 v2, v1
+; GFX942-NEXT: v_add_f16_e32 v1, -4.0, v2
+; GFX942-NEXT: v_and_or_b32 v1, v2, s2, v1
+; GFX942-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v1, v2
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB13_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3630,16 +3641,16 @@ define void @local_atomic_fsub_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX11-TRUE16-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_add_f16_e32 v2.l, -4.0, v1.l
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v2.h, 0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v1
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v1.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add_f16_e32 v1.l, -4.0, v2.l
+; GFX11-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -3656,16 +3667,17 @@ define void @local_atomic_fsub_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX11-FAKE16-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_add_f16_e32 v2, -4.0, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v1
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX11-FAKE16-NEXT: v_add_f16_e32 v1, -4.0, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -3682,15 +3694,15 @@ define void @local_atomic_fsub_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX10-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_add_f16_e32 v2, -4.0, v1
-; GFX10-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX10-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX10-NEXT: v_mov_b32_e32 v2, v1
+; GFX10-NEXT: v_add_f16_e32 v1, -4.0, v2
+; GFX10-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX10-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX10-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX10-NEXT: v_mov_b32_e32 v1, v2
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB13_1
@@ -3707,13 +3719,13 @@ define void @local_atomic_fsub_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX90A-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_add_f16_e32 v2, -4.0, v1
-; GFX90A-NEXT: v_and_or_b32 v2, v1, s6, v2
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX90A-NEXT: v_mov_b32_e32 v2, v1
+; GFX90A-NEXT: v_add_f16_e32 v1, -4.0, v2
+; GFX90A-NEXT: v_and_or_b32 v1, v2, s6, v1
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v1, v2
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB13_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3729,13 +3741,13 @@ define void @local_atomic_fsub_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX908-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_add_f16_e32 v2, -4.0, v1
-; GFX908-NEXT: v_and_or_b32 v2, v1, s6, v2
-; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX908-NEXT: v_mov_b32_e32 v2, v1
+; GFX908-NEXT: v_add_f16_e32 v1, -4.0, v2
+; GFX908-NEXT: v_and_or_b32 v1, v2, s6, v1
+; GFX908-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v1, v2
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB13_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3751,14 +3763,14 @@ define void @local_atomic_fsub_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX8-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_add_f16_e32 v2, -4.0, v1
-; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX8-NEXT: v_or_b32_e32 v2, v3, v2
-; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX8-NEXT: v_mov_b32_e32 v2, v1
+; GFX8-NEXT: v_add_f16_e32 v1, -4.0, v2
+; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX8-NEXT: v_or_b32_e32 v1, v3, v1
+; GFX8-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v1, v2
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB13_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3774,16 +3786,16 @@ define void @local_atomic_fsub_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX7-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cvt_f32_f16_e32 v2, v1
-; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX7-NEXT: v_add_f32_e32 v2, -4.0, v2
-; GFX7-NEXT: v_cvt_f16_f32_e32 v2, v2
-; GFX7-NEXT: v_or_b32_e32 v2, v3, v2
-; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX7-NEXT: v_mov_b32_e32 v2, v1
+; GFX7-NEXT: v_cvt_f32_f16_e32 v1, v2
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX7-NEXT: v_add_f32_e32 v1, -4.0, v1
+; GFX7-NEXT: v_cvt_f16_f32_e32 v1, v1
+; GFX7-NEXT: v_or_b32_e32 v1, v3, v1
+; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v1, v2
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB13_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -3800,16 +3812,16 @@ define void @local_atomic_fsub_noret_f16__offset__align4(ptr addrspace(3) %ptr)
; GFX6-NEXT: .LBB13_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cvt_f32_f16_e32 v2, v1
-; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX6-NEXT: v_add_f32_e32 v2, -4.0, v2
-; GFX6-NEXT: v_cvt_f16_f32_e32 v2, v2
-; GFX6-NEXT: v_or_b32_e32 v2, v3, v2
-; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX6-NEXT: v_mov_b32_e32 v2, v1
+; GFX6-NEXT: v_cvt_f32_f16_e32 v1, v2
+; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX6-NEXT: v_add_f32_e32 v1, -4.0, v1
+; GFX6-NEXT: v_cvt_f16_f32_e32 v1, v1
+; GFX6-NEXT: v_or_b32_e32 v1, v3, v1
+; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v1, v2
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB13_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4759,38 +4771,38 @@ define void @local_atomic_fsub_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX12-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX12-TRUE16-NEXT: s_mov_b32 s0, 0
-; GFX12-TRUE16-NEXT: ds_load_b32 v2, v1
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX12-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-TRUE16-NEXT: v_not_b32_e32 v2, v2
; GFX12-TRUE16-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
-; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v5
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v5
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -4811,37 +4823,37 @@ define void @local_atomic_fsub_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX12-FAKE16-NEXT: s_mov_b32 s0, 0
-; GFX12-FAKE16-NEXT: ds_load_b32 v2, v1
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX12-FAKE16-NEXT: ds_load_b32 v3, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX12-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX12-FAKE16-NEXT: v_not_b32_e32 v2, v2
; GFX12-FAKE16-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -4867,22 +4879,22 @@ define void @local_atomic_fsub_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX942-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX942-NEXT: v_mov_b32_e32 v4, v3
+; GFX942-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX942-NEXT: s_nop 0
-; GFX942-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX942-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX942-NEXT: v_add3_u32 v5, v5, v4, s2
-; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
+; GFX942-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX942-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX942-NEXT: v_add3_u32 v5, v5, v3, s2
+; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX942-NEXT: s_nop 1
-; GFX942-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX942-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX942-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX942-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v3, v4
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB16_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -4895,38 +4907,38 @@ define void @local_atomic_fsub_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
-; GFX11-TRUE16-NEXT: ds_load_b32 v2, v1
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-TRUE16-NEXT: ds_load_b32 v3, v1
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-TRUE16-NEXT: v_not_b32_e32 v2, v2
; GFX11-TRUE16-NEXT: .p2align 6
; GFX11-TRUE16-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v0, v5
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v0, v5
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -4941,37 +4953,37 @@ define void @local_atomic_fsub_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, -4, v0
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
-; GFX11-FAKE16-NEXT: ds_load_b32 v2, v1
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX11-FAKE16-NEXT: ds_load_b32 v3, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 24, v0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-FAKE16-NEXT: v_not_b32_e32 v3, v3
+; GFX11-FAKE16-NEXT: v_not_b32_e32 v2, v2
; GFX11-FAKE16-NEXT: .p2align 6
; GFX11-FAKE16-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v0, v2
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v0, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v0, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v1, v4, v2
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v1, v3, v4
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -4986,28 +4998,28 @@ define void @local_atomic_fsub_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX10-NEXT: v_and_b32_e32 v1, -4, v0
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 3, v0
; GFX10-NEXT: s_mov_b32 s4, 0
-; GFX10-NEXT: ds_read_b32 v2, v1
-; GFX10-NEXT: v_lshlrev_b32_e64 v3, v0, 0xffff
+; GFX10-NEXT: ds_read_b32 v3, v1
+; GFX10-NEXT: v_lshlrev_b32_e64 v2, v0, 0xffff
; GFX10-NEXT: v_and_b32_e32 v0, 24, v0
-; GFX10-NEXT: v_not_b32_e32 v3, v3
+; GFX10-NEXT: v_not_b32_e32 v2, v2
; GFX10-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshrrev_b32_sdwa v4, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX10-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX10-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX10-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX10-NEXT: v_and_or_b32 v4, v2, v3, v4
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX10-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX10-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v1, v2, v4
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v2
-; GFX10-NEXT: v_mov_b32_e32 v2, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB16_1
@@ -5030,20 +5042,20 @@ define void @local_atomic_fsub_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX90A-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX90A-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX90A-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX90A-NEXT: v_add3_u32 v5, v5, v4, s6
-; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX90A-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX90A-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX90A-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX90A-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX90A-NEXT: v_add3_u32 v5, v5, v3, s6
+; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX90A-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX90A-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB16_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5065,20 +5077,20 @@ define void @local_atomic_fsub_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX908-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX908-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX908-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX908-NEXT: v_add3_u32 v5, v5, v4, s6
-; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX908-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX908-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX908-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX908-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX908-NEXT: v_add3_u32 v5, v5, v3, s6
+; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX908-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX908-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB16_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5100,22 +5112,22 @@ define void @local_atomic_fsub_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX8-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_sdwa v4, v0, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX8-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshrrev_b32_sdwa v3, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3
; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6
-; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX8-NEXT: v_cndmask_b32_e32 v4, v6, v7, vcc
-; GFX8-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v6, v7, vcc
+; GFX8-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB16_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5136,18 +5148,18 @@ define void @local_atomic_fsub_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX7-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX7-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX7-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX7-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX7-NEXT: v_mov_b32_e32 v4, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX7-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v3, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB16_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5168,18 +5180,18 @@ define void @local_atomic_fsub_noret_bf16(ptr addrspace(3) %ptr) nounwind {
; GFX6-NEXT: .LBB16_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, v0, v3
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX6-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX6-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, v0, v4
-; GFX6-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v1, v3, v4
+; GFX6-NEXT: v_mov_b32_e32 v4, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, v0, v4
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, v0, v3
+; GFX6-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v1, v4, v3
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v3, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB16_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5210,29 +5222,30 @@ define void @local_atomic_fsub_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX12-TRUE16-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX12-TRUE16-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX12-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX12-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v5
-; GFX12-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -5263,28 +5276,29 @@ define void @local_atomic_fsub_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX12-FAKE16-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX12-FAKE16-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX12-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX12-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -5311,22 +5325,22 @@ define void @local_atomic_fsub_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX942-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX942-NEXT: v_mov_b32_e32 v4, v3
+; GFX942-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX942-NEXT: s_nop 0
-; GFX942-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX942-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX942-NEXT: v_add3_u32 v5, v5, v4, s2
-; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
+; GFX942-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX942-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX942-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX942-NEXT: v_add3_u32 v5, v5, v3, s2
+; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX942-NEXT: s_nop 1
-; GFX942-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX942-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX942-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX942-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX942-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX942-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v3, v4
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB17_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5350,28 +5364,29 @@ define void @local_atomic_fsub_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX11-TRUE16-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v4, v1, v5
-; GFX11-TRUE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX11-TRUE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.h, 0
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, v1, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -5397,27 +5412,28 @@ define void @local_atomic_fsub_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX11-FAKE16-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, v1, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX11-FAKE16-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX11-FAKE16-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX11-FAKE16-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -5440,21 +5456,21 @@ define void @local_atomic_fsub_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX10-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX10-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX10-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX10-NEXT: v_add3_u32 v5, v5, v4, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc_lo
-; GFX10-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX10-NEXT: v_and_or_b32 v4, v3, v2, v4
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX10-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX10-NEXT: v_add3_u32 v5, v5, v3, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc_lo
+; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX10-NEXT: v_and_or_b32 v3, v4, v2, v3
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX10-NEXT: v_mov_b32_e32 v3, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB17_1
@@ -5478,20 +5494,20 @@ define void @local_atomic_fsub_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX90A-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX90A-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX90A-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX90A-NEXT: v_add3_u32 v5, v5, v4, s6
-; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX90A-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX90A-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX90A-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX90A-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX90A-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX90A-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX90A-NEXT: v_add3_u32 v5, v5, v3, s6
+; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX90A-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX90A-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX90A-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB17_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5514,20 +5530,20 @@ define void @local_atomic_fsub_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX908-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX908-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX908-NEXT: v_bfe_u32 v5, v4, 16, 1
-; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v4
-; GFX908-NEXT: v_add3_u32 v5, v5, v4, s6
-; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX908-NEXT: v_cndmask_b32_e32 v4, v5, v6, vcc
-; GFX908-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX908-NEXT: v_and_or_b32 v4, v3, v2, v4
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX908-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX908-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX908-NEXT: v_or_b32_e32 v6, 0x400000, v3
+; GFX908-NEXT: v_add3_u32 v5, v5, v3, s6
+; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX908-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
+; GFX908-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX908-NEXT: v_and_or_b32 v3, v4, v2, v3
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB17_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5550,22 +5566,22 @@ define void @local_atomic_fsub_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX8-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshrrev_b32_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX8-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshrrev_b32_sdwa v3, v1, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3
; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6
-; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
-; GFX8-NEXT: v_cndmask_b32_e32 v4, v6, v7, vcc
-; GFX8-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX8-NEXT: v_lshlrev_b32_sdwa v4, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v6, v7, vcc
+; GFX8-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB17_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5587,18 +5603,18 @@ define void @local_atomic_fsub_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX7-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX7-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX7-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX7-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX7-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX7-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX7-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX7-NEXT: v_mov_b32_e32 v4, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX7-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX7-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX7-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX7-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX7-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v3, v4
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB17_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -5620,18 +5636,18 @@ define void @local_atomic_fsub_noret_bf16__offset(ptr addrspace(3) %ptr) nounwin
; GFX6-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, v1, v3
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; GFX6-NEXT: v_add_f32_e32 v4, -4.0, v4
-; GFX6-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; GFX6-NEXT: v_and_b32_e32 v5, v3, v2
-; GFX6-NEXT: v_lshlrev_b32_e32 v4, v1, v4
-; GFX6-NEXT: v_or_b32_e32 v4, v5, v4
-; GFX6-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX6-NEXT: v_mov_b32_e32 v4, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, v1, v4
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_add_f32_e32 v3, -4.0, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_and_b32_e32 v5, v4, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, v1, v3
+; GFX6-NEXT: v_or_b32_e32 v3, v5, v3
+; GFX6-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v3, v4
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB17_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6031,26 +6047,27 @@ define void @local_atomic_fsub_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX12-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v2, v1
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_add_f32_e32 v2, -4.0, v2
-; GFX12-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX12-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX12-TRUE16-NEXT: v_add_f32_e32 v1, -4.0, v1
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX12-TRUE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
-; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.h
-; GFX12-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v3
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v3.l, v1.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v3
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -6073,25 +6090,26 @@ define void @local_atomic_fsub_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX12-FAKE16-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v2, v1
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_add_f32_e32 v2, -4.0, v2
-; GFX12-FAKE16-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX12-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX12-FAKE16-NEXT: v_add_f32_e32 v1, -4.0, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX12-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX12-FAKE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX12-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX12-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -6112,21 +6130,21 @@ define void @local_atomic_fsub_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX942-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX942-NEXT: v_add_f32_e32 v2, -4.0, v2
-; GFX942-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX942-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX942-NEXT: v_add3_u32 v3, v3, v2, s2
-; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
+; GFX942-NEXT: v_mov_b32_e32 v2, v1
+; GFX942-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX942-NEXT: v_add_f32_e32 v1, -4.0, v1
+; GFX942-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX942-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX942-NEXT: v_add3_u32 v3, v3, v1, s2
+; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX942-NEXT: s_nop 1
-; GFX942-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
-; GFX942-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX942-NEXT: v_and_or_b32 v2, v1, s3, v2
-; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX942-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
+; GFX942-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX942-NEXT: v_and_or_b32 v1, v2, s3, v1
+; GFX942-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v1, v2
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB19_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6142,25 +6160,26 @@ define void @local_atomic_fsub_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX11-TRUE16-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, v1
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_add_f32_e32 v2, -4.0, v2
-; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX11-TRUE16-NEXT: v_add_f32_e32 v1, -4.0, v1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-TRUE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.h, 0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v2.h
-; GFX11-TRUE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v3
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v3.l, v1.h
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v3
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -6178,24 +6197,25 @@ define void @local_atomic_fsub_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX11-FAKE16-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, v1
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_add_f32_e32 v2, -4.0, v2
-; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX11-FAKE16-NEXT: v_add_f32_e32 v1, -4.0, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11-FAKE16-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-FAKE16-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1 offset:65534
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:65534
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX11-FAKE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -6212,21 +6232,21 @@ define void @local_atomic_fsub_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX10-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX10-NEXT: v_add_f32_e32 v2, -4.0, v2
-; GFX10-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX10-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc_lo
-; GFX10-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX10-NEXT: v_and_or_b32 v2, 0xffff0000, v1, v2
+; GFX10-NEXT: v_mov_b32_e32 v2, v1
+; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX10-NEXT: v_add_f32_e32 v1, -4.0, v1
+; GFX10-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX10-NEXT: v_add3_u32 v3, v3, v1, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc_lo
+; GFX10-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX10-NEXT: v_and_or_b32 v1, 0xffff0000, v2, v1
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX10-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX10-NEXT: v_mov_b32_e32 v1, v2
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB19_1
@@ -6244,20 +6264,20 @@ define void @local_atomic_fsub_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX90A-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX90A-NEXT: v_add_f32_e32 v2, -4.0, v2
-; GFX90A-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX90A-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX90A-NEXT: v_add3_u32 v3, v3, v2, s6
-; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
-; GFX90A-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
-; GFX90A-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX90A-NEXT: v_and_or_b32 v2, v1, s7, v2
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX90A-NEXT: v_mov_b32_e32 v2, v1
+; GFX90A-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX90A-NEXT: v_add_f32_e32 v1, -4.0, v1
+; GFX90A-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX90A-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX90A-NEXT: v_add3_u32 v3, v3, v1, s6
+; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
+; GFX90A-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
+; GFX90A-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX90A-NEXT: v_and_or_b32 v1, v2, s7, v1
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v1, v2
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB19_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6274,20 +6294,20 @@ define void @local_atomic_fsub_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX908-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX908-NEXT: v_add_f32_e32 v2, -4.0, v2
-; GFX908-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX908-NEXT: v_or_b32_e32 v4, 0x400000, v2
-; GFX908-NEXT: v_add3_u32 v3, v3, v2, s6
-; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
-; GFX908-NEXT: v_cndmask_b32_e32 v2, v3, v4, vcc
-; GFX908-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX908-NEXT: v_and_or_b32 v2, v1, s7, v2
-; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX908-NEXT: v_mov_b32_e32 v2, v1
+; GFX908-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX908-NEXT: v_add_f32_e32 v1, -4.0, v1
+; GFX908-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX908-NEXT: v_or_b32_e32 v4, 0x400000, v1
+; GFX908-NEXT: v_add3_u32 v3, v3, v1, s6
+; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
+; GFX908-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
+; GFX908-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX908-NEXT: v_and_or_b32 v1, v2, s7, v1
+; GFX908-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v1, v2
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB19_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6303,21 +6323,21 @@ define void @local_atomic_fsub_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX8-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX8-NEXT: v_add_f32_e32 v2, -4.0, v2
-; GFX8-NEXT: v_bfe_u32 v4, v2, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v2
+; GFX8-NEXT: v_mov_b32_e32 v2, v1
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX8-NEXT: v_add_f32_e32 v1, -4.0, v1
+; GFX8-NEXT: v_bfe_u32 v4, v1, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v1
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0x7fff, v4
-; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v2
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
-; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX8-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc
-; GFX8-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v1
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
+; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v4, v5, vcc
+; GFX8-NEXT: v_or_b32_sdwa v1, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v1, v2
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB19_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6333,16 +6353,16 @@ define void @local_atomic_fsub_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX7-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX7-NEXT: v_add_f32_e32 v2, -4.0, v2
-; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX7-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX7-NEXT: v_or_b32_e32 v2, v3, v2
-; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2 offset:65534
+; GFX7-NEXT: v_mov_b32_e32 v2, v1
+; GFX7-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX7-NEXT: v_add_f32_e32 v1, -4.0, v1
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX7-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX7-NEXT: v_or_b32_e32 v1, v3, v1
+; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1 offset:65534
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v1, v2
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB19_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6359,16 +6379,16 @@ define void @local_atomic_fsub_noret_bf16__offset__align4(ptr addrspace(3) %ptr)
; GFX6-NEXT: .LBB19_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v1
-; GFX6-NEXT: v_add_f32_e32 v2, -4.0, v2
-; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX6-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX6-NEXT: v_or_b32_e32 v2, v3, v2
-; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX6-NEXT: v_mov_b32_e32 v2, v1
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v2
+; GFX6-NEXT: v_add_f32_e32 v1, -4.0, v1
+; GFX6-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX6-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_or_b32_e32 v1, v3, v1
+; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v1, v2
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB19_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6906,13 +6926,14 @@ define void @local_atomic_fsub_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX12-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_pk_add_f16 v3, v2, v1 neg_lo:[0,1] neg_hi:[0,1]
+; GFX12-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_pk_add_f16 v2, v3, v1 neg_lo:[0,1] neg_hi:[0,1]
; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v2
+; GFX12-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v3
; GFX12-NEXT: s_wait_dscnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX12-NEXT: v_mov_b32_e32 v2, v3
+; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-NEXT: s_wait_alu 0xfffe
@@ -6931,12 +6952,12 @@ define void @local_atomic_fsub_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX942-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_pk_add_f16 v3, v2, v1 neg_lo:[0,1] neg_hi:[0,1]
-; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3
+; GFX942-NEXT: v_mov_b32_e32 v3, v2
+; GFX942-NEXT: v_pk_add_f16 v2, v3, v1 neg_lo:[0,1] neg_hi:[0,1]
+; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v2, v3
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB22_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -6951,13 +6972,14 @@ define void @local_atomic_fsub_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX11-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_pk_add_f16 v3, v2, v1 neg_lo:[0,1] neg_hi:[0,1]
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_pk_add_f16 v2, v3, v1 neg_lo:[0,1] neg_hi:[0,1]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v2
+; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v3
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX11-NEXT: v_mov_b32_e32 v2, v3
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -6974,13 +6996,13 @@ define void @local_atomic_fsub_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX10-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_pk_add_f16 v3, v2, v1 neg_lo:[0,1] neg_hi:[0,1]
+; GFX10-NEXT: v_mov_b32_e32 v3, v2
+; GFX10-NEXT: v_pk_add_f16 v2, v3, v1 neg_lo:[0,1] neg_hi:[0,1]
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3
+; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX10-NEXT: v_mov_b32_e32 v2, v3
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB22_1
@@ -6996,12 +7018,12 @@ define void @local_atomic_fsub_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX90A-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_pk_add_f16 v3, v2, v1 neg_lo:[0,1] neg_hi:[0,1]
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3
+; GFX90A-NEXT: v_mov_b32_e32 v3, v2
+; GFX90A-NEXT: v_pk_add_f16 v2, v3, v1 neg_lo:[0,1] neg_hi:[0,1]
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v2, v3
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB22_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7016,12 +7038,12 @@ define void @local_atomic_fsub_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX908-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_pk_add_f16 v3, v2, v1 neg_lo:[0,1] neg_hi:[0,1]
-; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3
+; GFX908-NEXT: v_mov_b32_e32 v3, v2
+; GFX908-NEXT: v_pk_add_f16 v2, v3, v1 neg_lo:[0,1] neg_hi:[0,1]
+; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v2, v3
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB22_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7037,14 +7059,14 @@ define void @local_atomic_fsub_noret_v2f16(ptr addrspace(3) %ptr, <2 x half> %va
; GFX8-NEXT: .LBB22_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_sub_f16_sdwa v3, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_sub_f16_e32 v4, v2, v1
-; GFX8-NEXT: v_or_b32_e32 v3, v4, v3
-; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3
+; GFX8-NEXT: v_mov_b32_e32 v3, v2
+; GFX8-NEXT: v_sub_f16_sdwa v2, v3, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_sub_f16_e32 v4, v3, v1
+; GFX8-NEXT: v_or_b32_e32 v2, v4, v2
+; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v2, v3
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB22_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7149,13 +7171,14 @@ define void @local_atomic_fsub_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX12-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_pk_add_f16 v3, v2, v1 neg_lo:[0,1] neg_hi:[0,1]
+; GFX12-NEXT: v_mov_b32_e32 v3, v2
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_pk_add_f16 v2, v3, v1 neg_lo:[0,1] neg_hi:[0,1]
; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v2 offset:65532
+; GFX12-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v3 offset:65532
; GFX12-NEXT: s_wait_dscnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX12-NEXT: v_mov_b32_e32 v2, v3
+; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-NEXT: s_wait_alu 0xfffe
@@ -7174,12 +7197,12 @@ define void @local_atomic_fsub_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX942-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_pk_add_f16 v3, v2, v1 neg_lo:[0,1] neg_hi:[0,1]
-; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532
+; GFX942-NEXT: v_mov_b32_e32 v3, v2
+; GFX942-NEXT: v_pk_add_f16 v2, v3, v1 neg_lo:[0,1] neg_hi:[0,1]
+; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v2, v3
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB23_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7194,13 +7217,14 @@ define void @local_atomic_fsub_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX11-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_pk_add_f16 v3, v2, v1 neg_lo:[0,1] neg_hi:[0,1]
+; GFX11-NEXT: v_mov_b32_e32 v3, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_pk_add_f16 v2, v3, v1 neg_lo:[0,1] neg_hi:[0,1]
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v2 offset:65532
+; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v3 offset:65532
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX11-NEXT: v_mov_b32_e32 v2, v3
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -7217,13 +7241,13 @@ define void @local_atomic_fsub_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX10-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_pk_add_f16 v3, v2, v1 neg_lo:[0,1] neg_hi:[0,1]
+; GFX10-NEXT: v_mov_b32_e32 v3, v2
+; GFX10-NEXT: v_pk_add_f16 v2, v3, v1 neg_lo:[0,1] neg_hi:[0,1]
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532
+; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v2
-; GFX10-NEXT: v_mov_b32_e32 v2, v3
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v3
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB23_1
@@ -7239,12 +7263,12 @@ define void @local_atomic_fsub_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX90A-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_pk_add_f16 v3, v2, v1 neg_lo:[0,1] neg_hi:[0,1]
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532
+; GFX90A-NEXT: v_mov_b32_e32 v3, v2
+; GFX90A-NEXT: v_pk_add_f16 v2, v3, v1 neg_lo:[0,1] neg_hi:[0,1]
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v2, v3
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB23_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7259,12 +7283,12 @@ define void @local_atomic_fsub_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX908-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_pk_add_f16 v3, v2, v1 neg_lo:[0,1] neg_hi:[0,1]
-; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532
+; GFX908-NEXT: v_mov_b32_e32 v3, v2
+; GFX908-NEXT: v_pk_add_f16 v2, v3, v1 neg_lo:[0,1] neg_hi:[0,1]
+; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v2, v3
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB23_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -7280,14 +7304,14 @@ define void @local_atomic_fsub_noret_v2f16__offset(ptr addrspace(3) %ptr, <2 x h
; GFX8-NEXT: .LBB23_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_sub_f16_sdwa v3, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX8-NEXT: v_sub_f16_e32 v4, v2, v1
-; GFX8-NEXT: v_or_b32_e32 v3, v4, v3
-; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v2, v3 offset:65532
+; GFX8-NEXT: v_mov_b32_e32 v3, v2
+; GFX8-NEXT: v_sub_f16_sdwa v2, v3, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; GFX8-NEXT: v_sub_f16_e32 v4, v3, v1
+; GFX8-NEXT: v_or_b32_e32 v2, v4, v2
+; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v3, v2 offset:65532
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v2
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v3
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v2, v3
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB23_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -8357,31 +8381,34 @@ define void @local_atomic_fsub_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX12-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_dual_sub_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX12-TRUE16-NEXT: v_sub_f32_e32 v4, v4, v1
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v3, v3, v1
; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo
; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v5, v3
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v5, v4
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -8406,32 +8433,33 @@ define void @local_atomic_fsub_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX12-FAKE16-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_dual_sub_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX12-FAKE16-NEXT: v_sub_f32_e32 v4, v4, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v3, v3, v2
; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4
+; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
-; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v3, v6, v8, s0
+; GFX12-FAKE16-NEXT: v_perm_b32 v3, v5, v3, 0x7060302
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -8454,27 +8482,27 @@ define void @local_atomic_fsub_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX942-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX942-NEXT: v_sub_f32_e32 v4, v4, v2
+; GFX942-NEXT: v_mov_b32_e32 v4, v3
+; GFX942-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX942-NEXT: v_sub_f32_e32 v3, v3, v2
; GFX942-NEXT: v_sub_f32_e32 v5, v5, v1
-; GFX942-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX942-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX942-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX942-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX942-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX942-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX942-NEXT: v_add3_u32 v6, v6, v4, s4
+; GFX942-NEXT: v_add3_u32 v6, v6, v3, s4
; GFX942-NEXT: v_add3_u32 v8, v8, v5, s4
; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX942-NEXT: v_cmp_u_f32_e64 s[0:1], v4, v4
+; GFX942-NEXT: v_cmp_u_f32_e64 s[0:1], v3, v3
; GFX942-NEXT: s_nop 0
; GFX942-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX942-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[0:1]
-; GFX942-NEXT: v_perm_b32 v4, v5, v4, s5
-; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX942-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[0:1]
+; GFX942-NEXT: v_perm_b32 v3, v5, v3, s5
+; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; GFX942-NEXT: v_mov_b32_e32 v3, v4
; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_cbranch_execnz .LBB26_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -8492,30 +8520,32 @@ define void @local_atomic_fsub_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX11-TRUE16-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_dual_sub_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX11-TRUE16-NEXT: v_sub_f32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v3, v3, v1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v5, v3
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v5, v4
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -8536,30 +8566,32 @@ define void @local_atomic_fsub_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX11-FAKE16-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_dual_sub_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX11-FAKE16-NEXT: v_sub_f32_e32 v4, v4, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v3, v3, v2
; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v6, v8, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v5, v3, 0x7060302
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
@@ -8579,27 +8611,27 @@ define void @local_atomic_fsub_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX10-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX10-NEXT: v_sub_f32_e32 v4, v4, v2
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX10-NEXT: v_sub_f32_e32 v3, v3, v2
; GFX10-NEXT: v_sub_f32_e32 v5, v5, v1
-; GFX10-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX10-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX10-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v3
; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX10-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX10-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
; GFX10-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX10-NEXT: v_cmp_u_f32_e64 s4, v4, v4
+; GFX10-NEXT: v_cmp_u_f32_e64 s4, v3, v3
; GFX10-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e64 v4, v6, v8, s4
-; GFX10-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v6, v8, s4
+; GFX10-NEXT: v_perm_b32 v3, v5, v3, 0x7060302
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX10-NEXT: v_mov_b32_e32 v3, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5
; GFX10-NEXT: s_cbranch_execnz .LBB26_1
@@ -8619,26 +8651,26 @@ define void @local_atomic_fsub_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX90A-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX90A-NEXT: v_sub_f32_e32 v4, v4, v2
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX90A-NEXT: v_sub_f32_e32 v3, v3, v2
; GFX90A-NEXT: v_sub_f32_e32 v5, v5, v1
-; GFX90A-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX90A-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX90A-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX90A-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX90A-NEXT: v_add3_u32 v6, v6, v4, s8
+; GFX90A-NEXT: v_add3_u32 v6, v6, v3, s8
; GFX90A-NEXT: v_add3_u32 v8, v8, v5, s8
; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
-; GFX90A-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
+; GFX90A-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX90A-NEXT: v_perm_b32 v4, v5, v4, s9
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX90A-NEXT: v_perm_b32 v3, v5, v3, s9
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX90A-NEXT: s_cbranch_execnz .LBB26_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -8657,26 +8689,26 @@ define void @local_atomic_fsub_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX908-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX908-NEXT: v_sub_f32_e32 v4, v4, v2
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX908-NEXT: v_sub_f32_e32 v3, v3, v2
; GFX908-NEXT: v_sub_f32_e32 v5, v5, v1
-; GFX908-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX908-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX908-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX908-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX908-NEXT: v_add3_u32 v6, v6, v4, s8
+; GFX908-NEXT: v_add3_u32 v6, v6, v3, s8
; GFX908-NEXT: v_add3_u32 v8, v8, v5, s8
; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
-; GFX908-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
+; GFX908-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX908-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX908-NEXT: v_perm_b32 v4, v5, v4, s9
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX908-NEXT: v_perm_b32 v3, v5, v3, s9
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX908-NEXT: s_cbranch_execnz .LBB26_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -8694,29 +8726,29 @@ define void @local_atomic_fsub_noret_v2bf16(ptr addrspace(3) %ptr, <2 x bfloat>
; GFX8-NEXT: .LBB26_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX8-NEXT: v_sub_f32_e32 v4, v4, v2
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX8-NEXT: v_sub_f32_e32 v3, v3, v2
; GFX8-NEXT: v_sub_f32_e32 v5, v5, v1
-; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX8-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3
; GFX8-NEXT: v_add_u32_e32 v8, vcc, v8, v5
; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6
; GFX8-NEXT: v_add_u32_e32 v8, vcc, 0x7fff, v8
; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4
-; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
+; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3
+; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
; GFX8-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX8-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX8-NEXT: v_alignbit_b32 v4, v5, v4, 16
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4
+; GFX8-NEXT: v_alignbit_b32 v3, v5, v3, 16
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX8-NEXT: s_cbranch_execnz .LBB26_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -8815,31 +8847,34 @@ define void @local_atomic_fsub_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX12-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX12-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
-; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX12-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-TRUE16-NEXT: v_dual_sub_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX12-TRUE16-NEXT: v_sub_f32_e32 v4, v4, v1
+; GFX12-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX12-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX12-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX12-TRUE16-NEXT: v_sub_f32_e32 v3, v3, v1
; GFX12-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX12-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX12-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX12-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX12-TRUE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
-; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo
+; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo
; GFX12-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffd
; GFX12-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX12-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX12-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
; GFX12-TRUE16-NEXT: s_wait_storecnt 0x0
-; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v5, v3 offset:65532
+; GFX12-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v5, v4 offset:65532
; GFX12-TRUE16-NEXT: s_wait_dscnt 0x0
; GFX12-TRUE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
; GFX12-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-TRUE16-NEXT: s_wait_alu 0xfffe
@@ -8864,32 +8899,33 @@ define void @local_atomic_fsub_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX12-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
-; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_dual_sub_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX12-FAKE16-NEXT: v_sub_f32_e32 v4, v4, v2
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v1
+; GFX12-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX12-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX12-FAKE16-NEXT: v_sub_f32_e32 v3, v3, v2
; GFX12-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX12-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX12-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4
+; GFX12-FAKE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX12-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX12-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffd
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX12-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
+; GFX12-FAKE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
; GFX12-FAKE16-NEXT: s_wait_alu 0xf1ff
-; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
-; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX12-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_cndmask_b32_e64 v3, v6, v8, s0
+; GFX12-FAKE16-NEXT: v_perm_b32 v3, v5, v3, 0x7060302
; GFX12-FAKE16-NEXT: s_wait_storecnt 0x0
-; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 offset:65532
+; GFX12-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 offset:65532
; GFX12-FAKE16-NEXT: s_wait_dscnt 0x0
; GFX12-FAKE16-NEXT: global_inv scope:SCOPE_SE
-; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX12-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX12-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
; GFX12-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
; GFX12-FAKE16-NEXT: s_wait_alu 0xfffe
@@ -8912,27 +8948,27 @@ define void @local_atomic_fsub_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX942-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX942-NEXT: v_sub_f32_e32 v4, v4, v2
+; GFX942-NEXT: v_mov_b32_e32 v4, v3
+; GFX942-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX942-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX942-NEXT: v_sub_f32_e32 v3, v3, v2
; GFX942-NEXT: v_sub_f32_e32 v5, v5, v1
-; GFX942-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX942-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX942-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX942-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX942-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX942-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX942-NEXT: v_add3_u32 v6, v6, v4, s4
+; GFX942-NEXT: v_add3_u32 v6, v6, v3, s4
; GFX942-NEXT: v_add3_u32 v8, v8, v5, s4
; GFX942-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX942-NEXT: v_cmp_u_f32_e64 s[0:1], v4, v4
+; GFX942-NEXT: v_cmp_u_f32_e64 s[0:1], v3, v3
; GFX942-NEXT: s_nop 0
; GFX942-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX942-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[0:1]
-; GFX942-NEXT: v_perm_b32 v4, v5, v4, s5
-; GFX942-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532
+; GFX942-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[0:1]
+; GFX942-NEXT: v_perm_b32 v3, v5, v3, s5
+; GFX942-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX942-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
-; GFX942-NEXT: v_mov_b32_e32 v3, v4
; GFX942-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX942-NEXT: s_cbranch_execnz .LBB27_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -8950,30 +8986,32 @@ define void @local_atomic_fsub_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX11-TRUE16-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX11-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-TRUE16-NEXT: v_dual_sub_f32 v5, v5, v2 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX11-TRUE16-NEXT: v_sub_f32_e32 v4, v4, v1
+; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v5, v5, v2
+; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
-; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11-TRUE16-NEXT: v_sub_f32_e32 v3, v3, v1
; GFX11-TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v4, v6, v8, vcc_lo
+; GFX11-TRUE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo
; GFX11-TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX11-TRUE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v4.h
+; GFX11-TRUE16-NEXT: v_mov_b16_e32 v5.l, v3.h
; GFX11-TRUE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v5, v3 offset:65532
+; GFX11-TRUE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v5, v4 offset:65532
; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-TRUE16-NEXT: buffer_gl0_inv
-; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-TRUE16-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -8994,30 +9032,32 @@ define void @local_atomic_fsub_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX11-FAKE16-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v4, v3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_dual_sub_f32 v5, v5, v1 :: v_dual_lshlrev_b32 v4, 16, v3
-; GFX11-FAKE16-NEXT: v_sub_f32_e32 v4, v4, v2
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v5, v5, v1
+; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-FAKE16-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v4, 16, 1
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11-FAKE16-NEXT: v_sub_f32_e32 v3, v3, v2
; GFX11-FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX11-FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-FAKE16-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
-; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v4, v4
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_bfe_u32 v6, v3, 16, 1
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
+; GFX11-FAKE16-NEXT: v_cmp_u_f32_e64 s0, v3, v3
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v4, v6, v8, s0
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-FAKE16-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX11-FAKE16-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v3, v6, v8, s0
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v5, v3, 0x7060302
; GFX11-FAKE16-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v4, v0, v4, v3 offset:65532
+; GFX11-FAKE16-NEXT: ds_cmpstore_rtn_b32 v3, v0, v3, v4 offset:65532
; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-FAKE16-NEXT: buffer_gl0_inv
-; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, v4
+; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX11-FAKE16-NEXT: s_or_b32 s1, vcc_lo, s1
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s1
@@ -9037,27 +9077,27 @@ define void @local_atomic_fsub_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX10-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX10-NEXT: v_sub_f32_e32 v4, v4, v2
+; GFX10-NEXT: v_mov_b32_e32 v4, v3
+; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX10-NEXT: v_sub_f32_e32 v3, v3, v2
; GFX10-NEXT: v_sub_f32_e32 v5, v5, v1
-; GFX10-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX10-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX10-NEXT: v_bfe_u32 v7, v5, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v3
; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX10-NEXT: v_add3_u32 v6, v6, v4, 0x7fff
+; GFX10-NEXT: v_add3_u32 v6, v6, v3, 0x7fff
; GFX10-NEXT: v_add3_u32 v7, v7, v5, 0x7fff
-; GFX10-NEXT: v_cmp_u_f32_e64 s4, v4, v4
+; GFX10-NEXT: v_cmp_u_f32_e64 s4, v3, v3
; GFX10-NEXT: v_cndmask_b32_e32 v5, v7, v9, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e64 v4, v6, v8, s4
-; GFX10-NEXT: v_perm_b32 v4, v5, v4, 0x7060302
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v6, v8, s4
+; GFX10-NEXT: v_perm_b32 v3, v5, v3, 0x7060302
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532
+; GFX10-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v4, v3
-; GFX10-NEXT: v_mov_b32_e32 v3, v4
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v3, v4
; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5
; GFX10-NEXT: s_cbranch_execnz .LBB27_1
@@ -9077,26 +9117,26 @@ define void @local_atomic_fsub_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX90A-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX90A-NEXT: v_sub_f32_e32 v4, v4, v2
+; GFX90A-NEXT: v_mov_b32_e32 v4, v3
+; GFX90A-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX90A-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX90A-NEXT: v_sub_f32_e32 v3, v3, v2
; GFX90A-NEXT: v_sub_f32_e32 v5, v5, v1
-; GFX90A-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX90A-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX90A-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX90A-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX90A-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX90A-NEXT: v_add3_u32 v6, v6, v4, s8
+; GFX90A-NEXT: v_add3_u32 v6, v6, v3, s8
; GFX90A-NEXT: v_add3_u32 v8, v8, v5, s8
; GFX90A-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
-; GFX90A-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX90A-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
+; GFX90A-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX90A-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX90A-NEXT: v_perm_b32 v4, v5, v4, s9
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532
+; GFX90A-NEXT: v_perm_b32 v3, v5, v3, s9
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX90A-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX90A-NEXT: v_mov_b32_e32 v3, v4
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX90A-NEXT: s_cbranch_execnz .LBB27_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -9115,26 +9155,26 @@ define void @local_atomic_fsub_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX908-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX908-NEXT: v_sub_f32_e32 v4, v4, v2
+; GFX908-NEXT: v_mov_b32_e32 v4, v3
+; GFX908-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX908-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX908-NEXT: v_sub_f32_e32 v3, v3, v2
; GFX908-NEXT: v_sub_f32_e32 v5, v5, v1
-; GFX908-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX908-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX908-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX908-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX908-NEXT: v_or_b32_e32 v9, 0x400000, v5
-; GFX908-NEXT: v_add3_u32 v6, v6, v4, s8
+; GFX908-NEXT: v_add3_u32 v6, v6, v3, s8
; GFX908-NEXT: v_add3_u32 v8, v8, v5, s8
; GFX908-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
-; GFX908-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX908-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
+; GFX908-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX908-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX908-NEXT: v_perm_b32 v4, v5, v4, s9
-; GFX908-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532
+; GFX908-NEXT: v_perm_b32 v3, v5, v3, s9
+; GFX908-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX908-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX908-NEXT: v_mov_b32_e32 v3, v4
; GFX908-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX908-NEXT: s_cbranch_execnz .LBB27_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -9152,29 +9192,29 @@ define void @local_atomic_fsub_noret_v2bf16__ofset(ptr addrspace(3) %ptr, <2 x b
; GFX8-NEXT: .LBB27_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v3
-; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v3
-; GFX8-NEXT: v_sub_f32_e32 v4, v4, v2
+; GFX8-NEXT: v_mov_b32_e32 v4, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX8-NEXT: v_sub_f32_e32 v3, v3, v2
; GFX8-NEXT: v_sub_f32_e32 v5, v5, v1
-; GFX8-NEXT: v_bfe_u32 v6, v4, 16, 1
+; GFX8-NEXT: v_bfe_u32 v6, v3, 16, 1
; GFX8-NEXT: v_bfe_u32 v8, v5, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v4
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v3
; GFX8-NEXT: v_add_u32_e32 v8, vcc, v8, v5
; GFX8-NEXT: v_add_u32_e32 v6, vcc, 0x7fff, v6
; GFX8-NEXT: v_add_u32_e32 v8, vcc, 0x7fff, v8
; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v5
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
-; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v4
-; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v4, v4
+; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3
+; GFX8-NEXT: v_cmp_u_f32_e64 s[4:5], v3, v3
; GFX8-NEXT: v_cndmask_b32_e32 v5, v8, v9, vcc
-; GFX8-NEXT: v_cndmask_b32_e64 v4, v6, v7, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v6, v7, s[4:5]
; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; GFX8-NEXT: v_alignbit_b32 v4, v5, v4, 16
-; GFX8-NEXT: ds_cmpst_rtn_b32 v4, v0, v3, v4 offset:65532
+; GFX8-NEXT: v_alignbit_b32 v3, v5, v3, 16
+; GFX8-NEXT: ds_cmpst_rtn_b32 v3, v0, v4, v3 offset:65532
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v4, v3
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4
; GFX8-NEXT: s_or_b64 s[6:7], vcc, s[6:7]
-; GFX8-NEXT: v_mov_b32_e32 v3, v4
; GFX8-NEXT: s_andn2_b64 exec, exec, s[6:7]
; GFX8-NEXT: s_cbranch_execnz .LBB27_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -9489,13 +9529,14 @@ define void @local_atomic_fsub_noret_f32__amdgpu_ignore_denormal_mode(ptr addrsp
; GFX12-NEXT: .LBB29_1: ; %atomicrmw.start
; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX12-NEXT: s_wait_dscnt 0x0
-; GFX12-NEXT: v_add_f32_e32 v2, -4.0, v1
+; GFX12-NEXT: v_mov_b32_e32 v2, v1
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_add_f32_e32 v1, -4.0, v2
; GFX12-NEXT: s_wait_storecnt 0x0
-; GFX12-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1
+; GFX12-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2
; GFX12-NEXT: s_wait_dscnt 0x0
; GFX12-NEXT: global_inv scope:SCOPE_SE
-; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX12-NEXT: v_mov_b32_e32 v1, v2
+; GFX12-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX12-NEXT: s_wait_alu 0xfffe
@@ -9514,12 +9555,12 @@ define void @local_atomic_fsub_noret_f32__amdgpu_ignore_denormal_mode(ptr addrsp
; GFX942-NEXT: .LBB29_1: ; %atomicrmw.start
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_add_f32_e32 v2, -4.0, v1
-; GFX942-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX942-NEXT: v_mov_b32_e32 v2, v1
+; GFX942-NEXT: v_add_f32_e32 v1, -4.0, v2
+; GFX942-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX942-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX942-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
-; GFX942-NEXT: v_mov_b32_e32 v1, v2
; GFX942-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX942-NEXT: s_cbranch_execnz .LBB29_1
; GFX942-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -9534,13 +9575,14 @@ define void @local_atomic_fsub_noret_f32__amdgpu_ignore_denormal_mode(ptr addrsp
; GFX11-NEXT: .LBB29_1: ; %atomicrmw.start
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: v_add_f32_e32 v2, -4.0, v1
+; GFX11-NEXT: v_mov_b32_e32 v2, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_f32_e32 v1, -4.0, v2
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-NEXT: ds_cmpstore_rtn_b32 v2, v0, v2, v1
+; GFX11-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: buffer_gl0_inv
-; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, v2
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX11-NEXT: s_or_b32 s0, vcc_lo, s0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
@@ -9557,13 +9599,13 @@ define void @local_atomic_fsub_noret_f32__amdgpu_ignore_denormal_mode(ptr addrsp
; GFX10-NEXT: .LBB29_1: ; %atomicrmw.start
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: v_add_f32_e32 v2, -4.0, v1
+; GFX10-NEXT: v_mov_b32_e32 v2, v1
+; GFX10-NEXT: v_add_f32_e32 v1, -4.0, v2
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX10-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_gl0_inv
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v2, v1
-; GFX10-NEXT: v_mov_b32_e32 v1, v2
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, v1, v2
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
; GFX10-NEXT: s_cbranch_execnz .LBB29_1
@@ -9579,12 +9621,12 @@ define void @local_atomic_fsub_noret_f32__amdgpu_ignore_denormal_mode(ptr addrsp
; GFX90A-NEXT: .LBB29_1: ; %atomicrmw.start
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_add_f32_e32 v2, -4.0, v1
-; GFX90A-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX90A-NEXT: v_mov_b32_e32 v2, v1
+; GFX90A-NEXT: v_add_f32_e32 v1, -4.0, v2
+; GFX90A-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX90A-NEXT: s_waitcnt lgkmcnt(0)
-; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX90A-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX90A-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX90A-NEXT: v_mov_b32_e32 v1, v2
; GFX90A-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX90A-NEXT: s_cbranch_execnz .LBB29_1
; GFX90A-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -9599,12 +9641,12 @@ define void @local_atomic_fsub_noret_f32__amdgpu_ignore_denormal_mode(ptr addrsp
; GFX908-NEXT: .LBB29_1: ; %atomicrmw.start
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_add_f32_e32 v2, -4.0, v1
-; GFX908-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX908-NEXT: v_mov_b32_e32 v2, v1
+; GFX908-NEXT: v_add_f32_e32 v1, -4.0, v2
+; GFX908-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX908-NEXT: s_waitcnt lgkmcnt(0)
-; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX908-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX908-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX908-NEXT: v_mov_b32_e32 v1, v2
; GFX908-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX908-NEXT: s_cbranch_execnz .LBB29_1
; GFX908-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -9620,12 +9662,12 @@ define void @local_atomic_fsub_noret_f32__amdgpu_ignore_denormal_mode(ptr addrsp
; GFX8-NEXT: .LBB29_1: ; %atomicrmw.start
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_add_f32_e32 v2, -4.0, v1
-; GFX8-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX8-NEXT: v_mov_b32_e32 v2, v1
+; GFX8-NEXT: v_add_f32_e32 v1, -4.0, v2
+; GFX8-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX8-NEXT: v_mov_b32_e32 v1, v2
; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX8-NEXT: s_cbranch_execnz .LBB29_1
; GFX8-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -9641,12 +9683,12 @@ define void @local_atomic_fsub_noret_f32__amdgpu_ignore_denormal_mode(ptr addrsp
; GFX7-NEXT: .LBB29_1: ; %atomicrmw.start
; GFX7-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_add_f32_e32 v2, -4.0, v1
-; GFX7-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX7-NEXT: v_mov_b32_e32 v2, v1
+; GFX7-NEXT: v_add_f32_e32 v1, -4.0, v2
+; GFX7-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX7-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX7-NEXT: v_mov_b32_e32 v1, v2
; GFX7-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7-NEXT: s_cbranch_execnz .LBB29_1
; GFX7-NEXT: ; %bb.2: ; %atomicrmw.end
@@ -9662,12 +9704,12 @@ define void @local_atomic_fsub_noret_f32__amdgpu_ignore_denormal_mode(ptr addrsp
; GFX6-NEXT: .LBB29_1: ; %atomicrmw.start
; GFX6-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_add_f32_e32 v2, -4.0, v1
-; GFX6-NEXT: ds_cmpst_rtn_b32 v2, v0, v1, v2
+; GFX6-NEXT: v_mov_b32_e32 v2, v1
+; GFX6-NEXT: v_add_f32_e32 v1, -4.0, v2
+; GFX6-NEXT: ds_cmpst_rtn_b32 v1, v0, v2, v1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1
+; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, v1, v2
; GFX6-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v1, v2
; GFX6-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX6-NEXT: s_cbranch_execnz .LBB29_1
; GFX6-NEXT: ; %bb.2: ; %atomicrmw.end
diff --git a/llvm/test/CodeGen/AMDGPU/lshr.v2i16.ll b/llvm/test/CodeGen/AMDGPU/lshr.v2i16.ll
index 68506ce..9056d40 100644
--- a/llvm/test/CodeGen/AMDGPU/lshr.v2i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/lshr.v2i16.ll
@@ -36,20 +36,19 @@ define amdgpu_kernel void @s_lshr_v2i16(ptr addrspace(1) %out, <2 x i16> %lhs, <
; CI-LABEL: s_lshr_v2i16:
; CI: ; %bb.0:
; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; CI-NEXT: s_mov_b32 s7, 0xf000
-; CI-NEXT: s_mov_b32 s6, -1
; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: s_mov_b32 s4, s0
-; CI-NEXT: s_mov_b32 s5, s1
-; CI-NEXT: s_and_b32 s0, s2, 0xffff
-; CI-NEXT: s_lshr_b32 s1, s2, 16
-; CI-NEXT: s_lshr_b32 s2, s3, 16
-; CI-NEXT: s_lshr_b32 s1, s1, s2
-; CI-NEXT: s_lshl_b32 s1, s1, 16
-; CI-NEXT: s_lshr_b32 s0, s0, s3
-; CI-NEXT: s_or_b32 s0, s0, s1
-; CI-NEXT: v_mov_b32_e32 v0, s0
-; CI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; CI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; CI-NEXT: s_and_b32 s6, s4, 0xffff
+; CI-NEXT: s_lshr_b32 s4, s4, 16
+; CI-NEXT: s_lshr_b32 s7, s5, 16
+; CI-NEXT: s_lshr_b32 s4, s4, s7
+; CI-NEXT: s_lshl_b32 s4, s4, 16
+; CI-NEXT: s_lshr_b32 s5, s6, s5
+; CI-NEXT: s_or_b32 s4, s5, s4
+; CI-NEXT: s_mov_b32 s3, 0xf000
+; CI-NEXT: s_mov_b32 s2, -1
+; CI-NEXT: v_mov_b32_e32 v0, s4
+; CI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; CI-NEXT: s_endpgm
;
; GFX10-LABEL: s_lshr_v2i16:
diff --git a/llvm/test/CodeGen/AMDGPU/machine-sink-temporal-divergence-swdev407790.ll b/llvm/test/CodeGen/AMDGPU/machine-sink-temporal-divergence-swdev407790.ll
index 680942fcb..9ecd35e 100644
--- a/llvm/test/CodeGen/AMDGPU/machine-sink-temporal-divergence-swdev407790.ll
+++ b/llvm/test/CodeGen/AMDGPU/machine-sink-temporal-divergence-swdev407790.ll
@@ -133,7 +133,7 @@ define protected amdgpu_kernel void @kernel_round1(ptr addrspace(1) nocapture no
; CHECK-NEXT: ; %bb.3:
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s4
; CHECK-NEXT: v_add_nc_u32_e32 v45, -1, v42
-; CHECK-NEXT: s_mov_b32 s53, 0
+; CHECK-NEXT: s_mov_b32 s55, 0
; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v45
; CHECK-NEXT: s_and_b32 exec_lo, exec_lo, vcc_lo
; CHECK-NEXT: s_cbranch_execz .LBB0_25
@@ -141,7 +141,7 @@ define protected amdgpu_kernel void @kernel_round1(ptr addrspace(1) nocapture no
; CHECK-NEXT: v_lshlrev_b32_e32 v43, 10, v43
; CHECK-NEXT: v_add_nc_u32_e32 v46, 0x3c05, v0
; CHECK-NEXT: v_mov_b32_e32 v47, 0
-; CHECK-NEXT: s_mov_b32 s55, 0
+; CHECK-NEXT: s_mov_b32 s53, 0
; CHECK-NEXT: .LBB0_5: ; =>This Loop Header: Depth=1
; CHECK-NEXT: ; Child Loop BB0_8 Depth 2
; CHECK-NEXT: ; Child Loop BB0_20 Depth 2
@@ -866,8 +866,8 @@ define protected amdgpu_kernel void @kernel_round1_short(ptr addrspace(1) nocapt
; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
; CHECK-NEXT: v_mov_b32_e32 v41, v0
; CHECK-NEXT: v_lshlrev_b32_e32 v42, 10, v42
-; CHECK-NEXT: s_mov_b32 s52, 0
; CHECK-NEXT: s_mov_b32 s4, 0
+; CHECK-NEXT: s_mov_b32 s52, 0
; CHECK-NEXT: ds_write_b8 v46, v43 offset:15364
; CHECK-NEXT: v_add_nc_u32_e32 v45, -1, v41
; CHECK-NEXT: .LBB1_1: ; %.37
diff --git a/llvm/test/CodeGen/AMDGPU/mad_uint24.ll b/llvm/test/CodeGen/AMDGPU/mad_uint24.ll
index 46b8df4..9cc0e62 100644
--- a/llvm/test/CodeGen/AMDGPU/mad_uint24.ll
+++ b/llvm/test/CodeGen/AMDGPU/mad_uint24.ll
@@ -133,35 +133,33 @@ define amdgpu_kernel void @i16_mad24(ptr addrspace(1) %out, i16 %a, i16 %b, i16
; GCN-LABEL: i16_mad24:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GCN-NEXT: s_load_dword s4, s[4:5], 0xb
-; GCN-NEXT: s_mov_b32 s7, 0xf000
+; GCN-NEXT: s_load_dword s6, s[4:5], 0xb
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: s_lshr_b32 s2, s2, 16
-; GCN-NEXT: s_mul_i32 s2, s4, s2
-; GCN-NEXT: s_add_i32 s2, s2, s3
-; GCN-NEXT: s_sext_i32_i16 s2, s2
-; GCN-NEXT: s_mov_b32 s6, -1
-; GCN-NEXT: s_mov_b32 s4, s0
-; GCN-NEXT: s_mov_b32 s5, s1
-; GCN-NEXT: v_mov_b32_e32 v0, s2
-; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GCN-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GCN-NEXT: s_mov_b32 s3, 0xf000
+; GCN-NEXT: s_lshr_b32 s2, s4, 16
+; GCN-NEXT: s_mul_i32 s2, s6, s2
+; GCN-NEXT: s_add_i32 s2, s2, s5
+; GCN-NEXT: s_sext_i32_i16 s4, s2
+; GCN-NEXT: s_mov_b32 s2, -1
+; GCN-NEXT: v_mov_b32_e32 v0, s4
+; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GCN-NEXT: s_endpgm
;
; GFX8-LABEL: i16_mad24:
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX8-NEXT: s_load_dword s8, s[4:5], 0x2c
-; GFX8-NEXT: s_mov_b32 s7, 0xf000
-; GFX8-NEXT: s_mov_b32 s6, -1
+; GFX8-NEXT: s_load_dword s6, s[4:5], 0x2c
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: s_mov_b32 s4, s0
-; GFX8-NEXT: s_lshr_b32 s0, s2, 16
-; GFX8-NEXT: s_mul_i32 s0, s8, s0
-; GFX8-NEXT: s_add_i32 s0, s0, s3
-; GFX8-NEXT: s_sext_i32_i16 s0, s0
-; GFX8-NEXT: s_mov_b32 s5, s1
-; GFX8-NEXT: v_mov_b32_e32 v0, s0
-; GFX8-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX8-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX8-NEXT: s_lshr_b32 s4, s4, 16
+; GFX8-NEXT: s_mul_i32 s4, s6, s4
+; GFX8-NEXT: s_add_i32 s4, s4, s5
+; GFX8-NEXT: s_sext_i32_i16 s4, s4
+; GFX8-NEXT: s_mov_b32 s3, 0xf000
+; GFX8-NEXT: s_mov_b32 s2, -1
+; GFX8-NEXT: v_mov_b32_e32 v0, s4
+; GFX8-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX8-NEXT: s_endpgm
entry:
%0 = mul i16 %a, %b
diff --git a/llvm/test/CodeGen/AMDGPU/max.ll b/llvm/test/CodeGen/AMDGPU/max.ll
index ba53294..c48e25f3 100644
--- a/llvm/test/CodeGen/AMDGPU/max.ll
+++ b/llvm/test/CodeGen/AMDGPU/max.ll
@@ -155,14 +155,13 @@ define amdgpu_kernel void @s_test_imax_sge_i32(ptr addrspace(1) %out, i32 %a, i3
; SI-LABEL: s_test_imax_sge_i32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_max_i32 s0, s2, s3
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: v_mov_b32_e32 v0, s0
-; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_max_i32 s4, s4, s5
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; GFX1250-LABEL: s_test_imax_sge_i32:
@@ -357,16 +356,15 @@ define amdgpu_kernel void @s_test_imax_sgt_imm_v2i32(ptr addrspace(1) %out, <2 x
; SI-LABEL: s_test_imax_sgt_imm_v2i32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: s_max_i32 s0, s3, 9
-; SI-NEXT: s_max_i32 s1, s2, 9
-; SI-NEXT: v_mov_b32_e32 v0, s1
-; SI-NEXT: v_mov_b32_e32 v1, s0
-; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_max_i32 s5, s5, 9
+; SI-NEXT: s_max_i32 s4, s4, 9
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: v_mov_b32_e32 v1, s5
+; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; GFX1250-LABEL: s_test_imax_sgt_imm_v2i32:
@@ -472,14 +470,13 @@ define amdgpu_kernel void @s_test_imax_sgt_i32(ptr addrspace(1) %out, i32 %a, i3
; SI-LABEL: s_test_imax_sgt_i32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_max_i32 s0, s2, s3
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: v_mov_b32_e32 v0, s0
-; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_max_i32 s4, s4, s5
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; GFX1250-LABEL: s_test_imax_sgt_i32:
@@ -582,14 +579,13 @@ define amdgpu_kernel void @s_test_umax_uge_i32(ptr addrspace(1) %out, i32 %a, i3
; SI-LABEL: s_test_umax_uge_i32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_max_u32 s0, s2, s3
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: v_mov_b32_e32 v0, s0
-; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_max_u32 s4, s4, s5
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; GFX1250-LABEL: s_test_umax_uge_i32:
@@ -817,14 +813,13 @@ define amdgpu_kernel void @s_test_umax_ugt_i32(ptr addrspace(1) %out, i32 %a, i3
; SI-LABEL: s_test_umax_ugt_i32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_max_u32 s0, s2, s3
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: v_mov_b32_e32 v0, s0
-; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_max_u32 s4, s4, s5
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; GFX1250-LABEL: s_test_umax_ugt_i32:
@@ -858,16 +853,15 @@ define amdgpu_kernel void @s_test_umax_ugt_imm_v2i32(ptr addrspace(1) %out, <2 x
; SI-LABEL: s_test_umax_ugt_imm_v2i32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: s_max_u32 s0, s3, 23
-; SI-NEXT: s_max_u32 s1, s2, 15
-; SI-NEXT: v_mov_b32_e32 v0, s1
-; SI-NEXT: v_mov_b32_e32 v1, s0
-; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_max_u32 s5, s5, 23
+; SI-NEXT: s_max_u32 s4, s4, 15
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: v_mov_b32_e32 v1, s5
+; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; GFX1250-LABEL: s_test_umax_ugt_imm_v2i32:
diff --git a/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll b/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll
index ca4f5d2..43752c2 100644
--- a/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll
+++ b/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll
@@ -90,18 +90,18 @@ define void @issue63986(i64 %0, i64 %idxprom, ptr inreg %ptr) {
; CHECK-NEXT: .LBB0_13: ; %loop-memcpy-expansion2
; CHECK-NEXT: ; Parent Loop BB0_11 Depth=1
; CHECK-NEXT: ; => This Inner Loop Header: Depth=2
-; CHECK-NEXT: v_mov_b32_e32 v6, s12
-; CHECK-NEXT: v_mov_b32_e32 v7, s13
+; CHECK-NEXT: v_mov_b32_e32 v6, s10
+; CHECK-NEXT: v_mov_b32_e32 v7, s11
; CHECK-NEXT: flat_load_dwordx4 v[10:13], v[6:7]
-; CHECK-NEXT: v_add_co_u32_e32 v6, vcc, s12, v8
-; CHECK-NEXT: s_add_u32 s12, s12, 16
+; CHECK-NEXT: v_add_co_u32_e32 v6, vcc, s10, v8
+; CHECK-NEXT: s_add_u32 s10, s10, 16
; CHECK-NEXT: v_addc_co_u32_e32 v7, vcc, v9, v7, vcc
-; CHECK-NEXT: s_addc_u32 s13, s13, 0
-; CHECK-NEXT: v_cmp_ge_u64_e32 vcc, s[12:13], v[0:1]
-; CHECK-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
+; CHECK-NEXT: s_addc_u32 s11, s11, 0
+; CHECK-NEXT: v_cmp_ge_u64_e32 vcc, s[10:11], v[0:1]
+; CHECK-NEXT: s_or_b64 s[12:13], vcc, s[12:13]
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: flat_store_dwordx4 v[6:7], v[10:13]
-; CHECK-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; CHECK-NEXT: s_andn2_b64 exec, exec, s[12:13]
; CHECK-NEXT: s_cbranch_execnz .LBB0_13
; CHECK-NEXT: .LBB0_14: ; %Flow15
; CHECK-NEXT: ; in Loop: Header=BB0_11 Depth=1
@@ -115,8 +115,8 @@ define void @issue63986(i64 %0, i64 %idxprom, ptr inreg %ptr) {
; CHECK-NEXT: s_cbranch_execz .LBB0_9
; CHECK-NEXT: ; %bb.16: ; %loop-memcpy-residual4.preheader
; CHECK-NEXT: ; in Loop: Header=BB0_11 Depth=1
-; CHECK-NEXT: s_mov_b64 s[12:13], 0
; CHECK-NEXT: s_mov_b64 s[14:15], 0
+; CHECK-NEXT: s_mov_b64 s[12:13], 0
; CHECK-NEXT: .LBB0_17: ; %loop-memcpy-residual4
; CHECK-NEXT: ; Parent Loop BB0_11 Depth=1
; CHECK-NEXT: ; => This Inner Loop Header: Depth=2
diff --git a/llvm/test/CodeGen/AMDGPU/memmove-var-size.ll b/llvm/test/CodeGen/AMDGPU/memmove-var-size.ll
index 14b0729..953511d 100644
--- a/llvm/test/CodeGen/AMDGPU/memmove-var-size.ll
+++ b/llvm/test/CodeGen/AMDGPU/memmove-var-size.ll
@@ -10,13 +10,13 @@ define void @memmove_p0_p0(ptr addrspace(0) align 1 %dst, ptr addrspace(0) align
; CHECK-LABEL: memmove_p0_p0:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_and_b32_e32 v8, 15, v4
-; CHECK-NEXT: v_mov_b32_e32 v9, 0
-; CHECK-NEXT: v_and_b32_e32 v6, -16, v4
-; CHECK-NEXT: v_mov_b32_e32 v7, v5
+; CHECK-NEXT: v_and_b32_e32 v6, 15, v4
+; CHECK-NEXT: v_mov_b32_e32 v7, 0
+; CHECK-NEXT: v_and_b32_e32 v8, -16, v4
+; CHECK-NEXT: v_mov_b32_e32 v9, v5
; CHECK-NEXT: s_mov_b32 s6, exec_lo
-; CHECK-NEXT: v_cmp_ne_u64_e64 s4, 0, v[8:9]
-; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
+; CHECK-NEXT: v_cmp_ne_u64_e64 s4, 0, v[6:7]
+; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[8:9]
; CHECK-NEXT: v_cmpx_ge_u64_e64 v[2:3], v[0:1]
; CHECK-NEXT: s_xor_b32 s7, exec_lo, s6
; CHECK-NEXT: s_cbranch_execnz .LBB0_3
@@ -33,10 +33,10 @@ define void @memmove_p0_p0(ptr addrspace(0) align 1 %dst, ptr addrspace(0) align
; CHECK-NEXT: ; %bb.4: ; %memmove_fwd_main_loop.preheader
; CHECK-NEXT: v_mov_b32_e32 v5, v3
; CHECK-NEXT: v_mov_b32_e32 v11, v1
-; CHECK-NEXT: v_mov_b32_e32 v13, v7
+; CHECK-NEXT: v_mov_b32_e32 v13, v9
; CHECK-NEXT: v_mov_b32_e32 v4, v2
; CHECK-NEXT: v_mov_b32_e32 v10, v0
-; CHECK-NEXT: v_mov_b32_e32 v12, v6
+; CHECK-NEXT: v_mov_b32_e32 v12, v8
; CHECK-NEXT: s_mov_b32 s9, 0
; CHECK-NEXT: .p2align 6
; CHECK-NEXT: .LBB0_5: ; %memmove_fwd_main_loop
@@ -59,20 +59,20 @@ define void @memmove_p0_p0(ptr addrspace(0) align 1 %dst, ptr addrspace(0) align
; CHECK-NEXT: s_and_saveexec_b32 s8, s4
; CHECK-NEXT: s_cbranch_execz .LBB0_9
; CHECK-NEXT: ; %bb.7: ; %memmove_fwd_residual_loop.preheader
-; CHECK-NEXT: v_add_co_u32 v0, s5, v0, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v7, s5
-; CHECK-NEXT: v_add_co_u32 v2, s5, v2, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, v3, v7, s5
+; CHECK-NEXT: v_add_co_u32 v0, s5, v0, v8
+; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v9, s5
+; CHECK-NEXT: v_add_co_u32 v2, s5, v2, v8
+; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, v3, v9, s5
; CHECK-NEXT: s_mov_b32 s9, 0
; CHECK-NEXT: .p2align 6
; CHECK-NEXT: .LBB0_8: ; %memmove_fwd_residual_loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: flat_load_ubyte v4, v[2:3]
-; CHECK-NEXT: v_add_co_u32 v8, s5, v8, -1
-; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v9, s5
+; CHECK-NEXT: v_add_co_u32 v6, s5, v6, -1
+; CHECK-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v7, s5
; CHECK-NEXT: v_add_co_u32 v2, s5, v2, 1
; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, s5
-; CHECK-NEXT: v_cmp_eq_u64_e64 s5, 0, v[8:9]
+; CHECK-NEXT: v_cmp_eq_u64_e64 s5, 0, v[6:7]
; CHECK-NEXT: s_or_b32 s9, s5, s9
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: flat_store_byte v[0:1], v4
@@ -82,10 +82,10 @@ define void @memmove_p0_p0(ptr addrspace(0) align 1 %dst, ptr addrspace(0) align
; CHECK-NEXT: s_cbranch_execnz .LBB0_8
; CHECK-NEXT: .LBB0_9: ; %Flow28
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s8
-; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7
+; CHECK-NEXT: ; implicit-def: $vgpr8_vgpr9
; CHECK-NEXT: ; implicit-def: $vgpr0_vgpr1
; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3
-; CHECK-NEXT: ; implicit-def: $vgpr8_vgpr9
+; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7
; CHECK-NEXT: ; implicit-def: $vgpr4_vgpr5
; CHECK-NEXT: s_andn2_saveexec_b32 s6, s7
; CHECK-NEXT: s_cbranch_execz .LBB0_2
@@ -104,11 +104,11 @@ define void @memmove_p0_p0(ptr addrspace(0) align 1 %dst, ptr addrspace(0) align
; CHECK-NEXT: .LBB0_12: ; %memmove_bwd_residual_loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: flat_load_ubyte v12, v[10:11]
-; CHECK-NEXT: v_add_co_u32 v8, s4, v8, -1
-; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v9, s4
+; CHECK-NEXT: v_add_co_u32 v6, s4, v6, -1
+; CHECK-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v7, s4
; CHECK-NEXT: v_add_co_u32 v10, s4, v10, -1
; CHECK-NEXT: v_add_co_ci_u32_e64 v11, null, -1, v11, s4
-; CHECK-NEXT: v_cmp_eq_u64_e64 s4, 0, v[8:9]
+; CHECK-NEXT: v_cmp_eq_u64_e64 s4, 0, v[6:7]
; CHECK-NEXT: s_or_b32 s8, s4, s8
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: flat_store_byte v[4:5], v12
@@ -129,19 +129,19 @@ define void @memmove_p0_p0(ptr addrspace(0) align 1 %dst, ptr addrspace(0) align
; CHECK-NEXT: .p2align 6
; CHECK-NEXT: .LBB0_15: ; %memmove_bwd_main_loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v2, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, v3, v7, vcc_lo
-; CHECK-NEXT: v_add_co_u32 v12, s4, v0, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v13, null, v1, v7, s4
-; CHECK-NEXT: flat_load_dwordx4 v[8:11], v[4:5]
-; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v6, -16
-; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, -1, v7, vcc_lo
-; CHECK-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[4:5]
-; CHECK-NEXT: v_mov_b32_e32 v7, v5
-; CHECK-NEXT: v_mov_b32_e32 v6, v4
+; CHECK-NEXT: v_mov_b32_e32 v11, v9
+; CHECK-NEXT: v_mov_b32_e32 v10, v8
+; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v2, v10
+; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, v3, v11, vcc_lo
+; CHECK-NEXT: v_add_co_u32 v8, vcc_lo, v10, -16
+; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v11, vcc_lo
+; CHECK-NEXT: flat_load_dwordx4 v[4:7], v[4:5]
+; CHECK-NEXT: v_add_co_u32 v10, s4, v0, v10
+; CHECK-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[8:9]
+; CHECK-NEXT: v_add_co_ci_u32_e64 v11, null, v1, v11, s4
; CHECK-NEXT: s_or_b32 s7, vcc_lo, s7
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: flat_store_dwordx4 v[12:13], v[8:11]
+; CHECK-NEXT: flat_store_dwordx4 v[10:11], v[4:7]
; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s7
; CHECK-NEXT: s_cbranch_execnz .LBB0_15
; CHECK-NEXT: .LBB0_16: ; %Flow32
@@ -158,13 +158,13 @@ define void @memmove_p0_p1(ptr addrspace(0) align 1 %dst, ptr addrspace(1) align
; CHECK-LABEL: memmove_p0_p1:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_and_b32_e32 v8, 15, v4
-; CHECK-NEXT: v_mov_b32_e32 v9, 0
-; CHECK-NEXT: v_and_b32_e32 v6, -16, v4
-; CHECK-NEXT: v_mov_b32_e32 v7, v5
+; CHECK-NEXT: v_and_b32_e32 v6, 15, v4
+; CHECK-NEXT: v_mov_b32_e32 v7, 0
+; CHECK-NEXT: v_and_b32_e32 v8, -16, v4
+; CHECK-NEXT: v_mov_b32_e32 v9, v5
; CHECK-NEXT: s_mov_b32 s6, exec_lo
-; CHECK-NEXT: v_cmp_ne_u64_e64 s4, 0, v[8:9]
-; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
+; CHECK-NEXT: v_cmp_ne_u64_e64 s4, 0, v[6:7]
+; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[8:9]
; CHECK-NEXT: v_cmpx_ge_u64_e64 v[2:3], v[0:1]
; CHECK-NEXT: s_xor_b32 s7, exec_lo, s6
; CHECK-NEXT: s_cbranch_execnz .LBB1_3
@@ -181,10 +181,10 @@ define void @memmove_p0_p1(ptr addrspace(0) align 1 %dst, ptr addrspace(1) align
; CHECK-NEXT: ; %bb.4: ; %memmove_fwd_main_loop.preheader
; CHECK-NEXT: v_mov_b32_e32 v5, v3
; CHECK-NEXT: v_mov_b32_e32 v11, v1
-; CHECK-NEXT: v_mov_b32_e32 v13, v7
+; CHECK-NEXT: v_mov_b32_e32 v13, v9
; CHECK-NEXT: v_mov_b32_e32 v4, v2
; CHECK-NEXT: v_mov_b32_e32 v10, v0
-; CHECK-NEXT: v_mov_b32_e32 v12, v6
+; CHECK-NEXT: v_mov_b32_e32 v12, v8
; CHECK-NEXT: s_mov_b32 s9, 0
; CHECK-NEXT: .p2align 6
; CHECK-NEXT: .LBB1_5: ; %memmove_fwd_main_loop
@@ -207,20 +207,20 @@ define void @memmove_p0_p1(ptr addrspace(0) align 1 %dst, ptr addrspace(1) align
; CHECK-NEXT: s_and_saveexec_b32 s8, s4
; CHECK-NEXT: s_cbranch_execz .LBB1_9
; CHECK-NEXT: ; %bb.7: ; %memmove_fwd_residual_loop.preheader
-; CHECK-NEXT: v_add_co_u32 v0, s5, v0, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v7, s5
-; CHECK-NEXT: v_add_co_u32 v2, s5, v2, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, v3, v7, s5
+; CHECK-NEXT: v_add_co_u32 v0, s5, v0, v8
+; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v9, s5
+; CHECK-NEXT: v_add_co_u32 v2, s5, v2, v8
+; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, v3, v9, s5
; CHECK-NEXT: s_mov_b32 s9, 0
; CHECK-NEXT: .p2align 6
; CHECK-NEXT: .LBB1_8: ; %memmove_fwd_residual_loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: global_load_ubyte v4, v[2:3], off
-; CHECK-NEXT: v_add_co_u32 v8, s5, v8, -1
-; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v9, s5
+; CHECK-NEXT: v_add_co_u32 v6, s5, v6, -1
+; CHECK-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v7, s5
; CHECK-NEXT: v_add_co_u32 v2, s5, v2, 1
; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, s5
-; CHECK-NEXT: v_cmp_eq_u64_e64 s5, 0, v[8:9]
+; CHECK-NEXT: v_cmp_eq_u64_e64 s5, 0, v[6:7]
; CHECK-NEXT: s_or_b32 s9, s5, s9
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: flat_store_byte v[0:1], v4
@@ -230,10 +230,10 @@ define void @memmove_p0_p1(ptr addrspace(0) align 1 %dst, ptr addrspace(1) align
; CHECK-NEXT: s_cbranch_execnz .LBB1_8
; CHECK-NEXT: .LBB1_9: ; %Flow30
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s8
-; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7
+; CHECK-NEXT: ; implicit-def: $vgpr8_vgpr9
; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3
; CHECK-NEXT: ; implicit-def: $vgpr0_vgpr1
-; CHECK-NEXT: ; implicit-def: $vgpr8_vgpr9
+; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7
; CHECK-NEXT: ; implicit-def: $vgpr4_vgpr5
; CHECK-NEXT: s_andn2_saveexec_b32 s6, s7
; CHECK-NEXT: s_cbranch_execz .LBB1_2
@@ -252,11 +252,11 @@ define void @memmove_p0_p1(ptr addrspace(0) align 1 %dst, ptr addrspace(1) align
; CHECK-NEXT: .LBB1_12: ; %memmove_bwd_residual_loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: global_load_ubyte v12, v[4:5], off
-; CHECK-NEXT: v_add_co_u32 v8, s4, v8, -1
-; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v9, s4
+; CHECK-NEXT: v_add_co_u32 v6, s4, v6, -1
+; CHECK-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v7, s4
; CHECK-NEXT: v_add_co_u32 v4, s4, v4, -1
; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, -1, v5, s4
-; CHECK-NEXT: v_cmp_eq_u64_e64 s4, 0, v[8:9]
+; CHECK-NEXT: v_cmp_eq_u64_e64 s4, 0, v[6:7]
; CHECK-NEXT: s_or_b32 s8, s4, s8
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: flat_store_byte v[10:11], v12
@@ -277,19 +277,19 @@ define void @memmove_p0_p1(ptr addrspace(0) align 1 %dst, ptr addrspace(1) align
; CHECK-NEXT: .p2align 6
; CHECK-NEXT: .LBB1_15: ; %memmove_bwd_main_loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v2, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, v3, v7, vcc_lo
-; CHECK-NEXT: v_add_co_u32 v12, s4, v0, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v13, null, v1, v7, s4
-; CHECK-NEXT: global_load_dwordx4 v[8:11], v[4:5], off
-; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v6, -16
-; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, -1, v7, vcc_lo
-; CHECK-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[4:5]
-; CHECK-NEXT: v_mov_b32_e32 v7, v5
-; CHECK-NEXT: v_mov_b32_e32 v6, v4
+; CHECK-NEXT: v_mov_b32_e32 v11, v9
+; CHECK-NEXT: v_mov_b32_e32 v10, v8
+; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v2, v10
+; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, v3, v11, vcc_lo
+; CHECK-NEXT: v_add_co_u32 v8, vcc_lo, v10, -16
+; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v11, vcc_lo
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[4:5], off
+; CHECK-NEXT: v_add_co_u32 v10, s4, v0, v10
+; CHECK-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[8:9]
+; CHECK-NEXT: v_add_co_ci_u32_e64 v11, null, v1, v11, s4
; CHECK-NEXT: s_or_b32 s7, vcc_lo, s7
; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: flat_store_dwordx4 v[12:13], v[8:11]
+; CHECK-NEXT: flat_store_dwordx4 v[10:11], v[4:7]
; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s7
; CHECK-NEXT: s_cbranch_execnz .LBB1_15
; CHECK-NEXT: .LBB1_16: ; %Flow34
@@ -423,17 +423,17 @@ define void @memmove_p0_p3(ptr addrspace(0) align 1 %dst, ptr addrspace(3) align
; CHECK-NEXT: .LBB2_15: ; %memmove_bwd_main_loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ds_read_b128 v[7:10], v2
-; CHECK-NEXT: v_add_co_u32 v3, vcc_lo, v5, -16
-; CHECK-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v6, vcc_lo
-; CHECK-NEXT: v_add_co_u32 v11, vcc_lo, v0, v5
-; CHECK-NEXT: v_add_co_ci_u32_e64 v12, null, v1, v6, vcc_lo
-; CHECK-NEXT: v_cmp_eq_u64_e64 s4, 0, v[3:4]
-; CHECK-NEXT: v_mov_b32_e32 v6, v4
+; CHECK-NEXT: v_mov_b32_e32 v3, v5
+; CHECK-NEXT: v_mov_b32_e32 v4, v6
; CHECK-NEXT: v_add_nc_u32_e32 v2, -16, v2
-; CHECK-NEXT: v_mov_b32_e32 v5, v3
-; CHECK-NEXT: s_or_b32 s7, s4, s7
+; CHECK-NEXT: v_add_co_u32 v5, vcc_lo, v3, -16
+; CHECK-NEXT: v_add_co_ci_u32_e64 v6, null, -1, v4, vcc_lo
+; CHECK-NEXT: v_add_co_u32 v3, s4, v0, v3
+; CHECK-NEXT: v_add_co_ci_u32_e64 v4, null, v1, v4, s4
+; CHECK-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[5:6]
+; CHECK-NEXT: s_or_b32 s7, vcc_lo, s7
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: flat_store_dwordx4 v[11:12], v[7:10]
+; CHECK-NEXT: flat_store_dwordx4 v[3:4], v[7:10]
; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s7
; CHECK-NEXT: s_cbranch_execnz .LBB2_15
; CHECK-NEXT: .LBB2_16: ; %Flow36
@@ -450,13 +450,13 @@ define void @memmove_p0_p4(ptr addrspace(0) align 1 %dst, ptr addrspace(4) align
; CHECK-LABEL: memmove_p0_p4:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_and_b32_e32 v8, 15, v4
-; CHECK-NEXT: v_mov_b32_e32 v9, 0
-; CHECK-NEXT: v_and_b32_e32 v6, -16, v4
-; CHECK-NEXT: v_mov_b32_e32 v7, v5
+; CHECK-NEXT: v_and_b32_e32 v6, 15, v4
+; CHECK-NEXT: v_mov_b32_e32 v7, 0
+; CHECK-NEXT: v_and_b32_e32 v8, -16, v4
+; CHECK-NEXT: v_mov_b32_e32 v9, v5
; CHECK-NEXT: s_mov_b32 s6, exec_lo
-; CHECK-NEXT: v_cmp_ne_u64_e64 s4, 0, v[8:9]
-; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
+; CHECK-NEXT: v_cmp_ne_u64_e64 s4, 0, v[6:7]
+; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[8:9]
; CHECK-NEXT: v_cmpx_ge_u64_e64 v[2:3], v[0:1]
; CHECK-NEXT: s_xor_b32 s7, exec_lo, s6
; CHECK-NEXT: s_cbranch_execnz .LBB3_3
@@ -473,10 +473,10 @@ define void @memmove_p0_p4(ptr addrspace(0) align 1 %dst, ptr addrspace(4) align
; CHECK-NEXT: ; %bb.4: ; %memmove_fwd_main_loop.preheader
; CHECK-NEXT: v_mov_b32_e32 v5, v3
; CHECK-NEXT: v_mov_b32_e32 v11, v1
-; CHECK-NEXT: v_mov_b32_e32 v13, v7
+; CHECK-NEXT: v_mov_b32_e32 v13, v9
; CHECK-NEXT: v_mov_b32_e32 v4, v2
; CHECK-NEXT: v_mov_b32_e32 v10, v0
-; CHECK-NEXT: v_mov_b32_e32 v12, v6
+; CHECK-NEXT: v_mov_b32_e32 v12, v8
; CHECK-NEXT: s_mov_b32 s9, 0
; CHECK-NEXT: .p2align 6
; CHECK-NEXT: .LBB3_5: ; %memmove_fwd_main_loop
@@ -499,20 +499,20 @@ define void @memmove_p0_p4(ptr addrspace(0) align 1 %dst, ptr addrspace(4) align
; CHECK-NEXT: s_and_saveexec_b32 s8, s4
; CHECK-NEXT: s_cbranch_execz .LBB3_9
; CHECK-NEXT: ; %bb.7: ; %memmove_fwd_residual_loop.preheader
-; CHECK-NEXT: v_add_co_u32 v0, s5, v0, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v7, s5
-; CHECK-NEXT: v_add_co_u32 v2, s5, v2, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, v3, v7, s5
+; CHECK-NEXT: v_add_co_u32 v0, s5, v0, v8
+; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v9, s5
+; CHECK-NEXT: v_add_co_u32 v2, s5, v2, v8
+; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, v3, v9, s5
; CHECK-NEXT: s_mov_b32 s9, 0
; CHECK-NEXT: .p2align 6
; CHECK-NEXT: .LBB3_8: ; %memmove_fwd_residual_loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: global_load_ubyte v4, v[2:3], off
-; CHECK-NEXT: v_add_co_u32 v8, s5, v8, -1
-; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v9, s5
+; CHECK-NEXT: v_add_co_u32 v6, s5, v6, -1
+; CHECK-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v7, s5
; CHECK-NEXT: v_add_co_u32 v2, s5, v2, 1
; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, s5
-; CHECK-NEXT: v_cmp_eq_u64_e64 s5, 0, v[8:9]
+; CHECK-NEXT: v_cmp_eq_u64_e64 s5, 0, v[6:7]
; CHECK-NEXT: s_or_b32 s9, s5, s9
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: flat_store_byte v[0:1], v4
@@ -522,10 +522,10 @@ define void @memmove_p0_p4(ptr addrspace(0) align 1 %dst, ptr addrspace(4) align
; CHECK-NEXT: s_cbranch_execnz .LBB3_8
; CHECK-NEXT: .LBB3_9: ; %Flow29
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s8
-; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7
+; CHECK-NEXT: ; implicit-def: $vgpr8_vgpr9
; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3
; CHECK-NEXT: ; implicit-def: $vgpr0_vgpr1
-; CHECK-NEXT: ; implicit-def: $vgpr8_vgpr9
+; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7
; CHECK-NEXT: ; implicit-def: $vgpr4_vgpr5
; CHECK-NEXT: s_andn2_saveexec_b32 s6, s7
; CHECK-NEXT: s_cbranch_execz .LBB3_2
@@ -544,11 +544,11 @@ define void @memmove_p0_p4(ptr addrspace(0) align 1 %dst, ptr addrspace(4) align
; CHECK-NEXT: .LBB3_12: ; %memmove_bwd_residual_loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: global_load_ubyte v12, v[10:11], off
-; CHECK-NEXT: v_add_co_u32 v8, s4, v8, -1
-; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v9, s4
+; CHECK-NEXT: v_add_co_u32 v6, s4, v6, -1
+; CHECK-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v7, s4
; CHECK-NEXT: v_add_co_u32 v10, s4, v10, -1
; CHECK-NEXT: v_add_co_ci_u32_e64 v11, null, -1, v11, s4
-; CHECK-NEXT: v_cmp_eq_u64_e64 s4, 0, v[8:9]
+; CHECK-NEXT: v_cmp_eq_u64_e64 s4, 0, v[6:7]
; CHECK-NEXT: s_or_b32 s8, s4, s8
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: flat_store_byte v[4:5], v12
@@ -569,19 +569,19 @@ define void @memmove_p0_p4(ptr addrspace(0) align 1 %dst, ptr addrspace(4) align
; CHECK-NEXT: .p2align 6
; CHECK-NEXT: .LBB3_15: ; %memmove_bwd_main_loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v2, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, v3, v7, vcc_lo
-; CHECK-NEXT: v_add_co_u32 v12, s4, v0, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v13, null, v1, v7, s4
-; CHECK-NEXT: global_load_dwordx4 v[8:11], v[4:5], off
-; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v6, -16
-; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, -1, v7, vcc_lo
-; CHECK-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[4:5]
-; CHECK-NEXT: v_mov_b32_e32 v7, v5
-; CHECK-NEXT: v_mov_b32_e32 v6, v4
+; CHECK-NEXT: v_mov_b32_e32 v11, v9
+; CHECK-NEXT: v_mov_b32_e32 v10, v8
+; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v2, v10
+; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, v3, v11, vcc_lo
+; CHECK-NEXT: v_add_co_u32 v8, vcc_lo, v10, -16
+; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v11, vcc_lo
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[4:5], off
+; CHECK-NEXT: v_add_co_u32 v10, s4, v0, v10
+; CHECK-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[8:9]
+; CHECK-NEXT: v_add_co_ci_u32_e64 v11, null, v1, v11, s4
; CHECK-NEXT: s_or_b32 s7, vcc_lo, s7
; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: flat_store_dwordx4 v[12:13], v[8:11]
+; CHECK-NEXT: flat_store_dwordx4 v[10:11], v[4:7]
; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s7
; CHECK-NEXT: s_cbranch_execnz .LBB3_15
; CHECK-NEXT: .LBB3_16: ; %Flow33
@@ -723,17 +723,17 @@ define void @memmove_p0_p5(ptr addrspace(0) align 1 %dst, ptr addrspace(5) align
; CHECK-NEXT: buffer_load_dword v8, v2, s[0:3], 0 offen offset:4
; CHECK-NEXT: buffer_load_dword v9, v2, s[0:3], 0 offen offset:8
; CHECK-NEXT: buffer_load_dword v10, v2, s[0:3], 0 offen offset:12
-; CHECK-NEXT: v_add_co_u32 v3, vcc_lo, v5, -16
-; CHECK-NEXT: v_add_co_ci_u32_e64 v4, null, -1, v6, vcc_lo
-; CHECK-NEXT: v_add_co_u32 v11, vcc_lo, v0, v5
-; CHECK-NEXT: v_add_co_ci_u32_e64 v12, null, v1, v6, vcc_lo
-; CHECK-NEXT: v_cmp_eq_u64_e64 s4, 0, v[3:4]
-; CHECK-NEXT: v_mov_b32_e32 v6, v4
+; CHECK-NEXT: v_mov_b32_e32 v3, v5
+; CHECK-NEXT: v_mov_b32_e32 v4, v6
; CHECK-NEXT: v_add_nc_u32_e32 v2, -16, v2
-; CHECK-NEXT: v_mov_b32_e32 v5, v3
-; CHECK-NEXT: s_or_b32 s7, s4, s7
+; CHECK-NEXT: v_add_co_u32 v5, vcc_lo, v3, -16
+; CHECK-NEXT: v_add_co_ci_u32_e64 v6, null, -1, v4, vcc_lo
+; CHECK-NEXT: v_add_co_u32 v3, s4, v0, v3
+; CHECK-NEXT: v_add_co_ci_u32_e64 v4, null, v1, v4, s4
+; CHECK-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[5:6]
+; CHECK-NEXT: s_or_b32 s7, vcc_lo, s7
; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: flat_store_dwordx4 v[11:12], v[7:10]
+; CHECK-NEXT: flat_store_dwordx4 v[3:4], v[7:10]
; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s7
; CHECK-NEXT: s_cbranch_execnz .LBB4_15
; CHECK-NEXT: .LBB4_16: ; %Flow36
@@ -751,13 +751,13 @@ define void @memmove_p1_p0(ptr addrspace(1) align 1 %dst, ptr addrspace(0) align
; CHECK-LABEL: memmove_p1_p0:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_and_b32_e32 v8, 15, v4
-; CHECK-NEXT: v_mov_b32_e32 v9, 0
-; CHECK-NEXT: v_and_b32_e32 v6, -16, v4
-; CHECK-NEXT: v_mov_b32_e32 v7, v5
+; CHECK-NEXT: v_and_b32_e32 v6, 15, v4
+; CHECK-NEXT: v_mov_b32_e32 v7, 0
+; CHECK-NEXT: v_and_b32_e32 v8, -16, v4
+; CHECK-NEXT: v_mov_b32_e32 v9, v5
; CHECK-NEXT: s_mov_b32 s6, exec_lo
-; CHECK-NEXT: v_cmp_ne_u64_e64 s4, 0, v[8:9]
-; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
+; CHECK-NEXT: v_cmp_ne_u64_e64 s4, 0, v[6:7]
+; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[8:9]
; CHECK-NEXT: v_cmpx_ge_u64_e64 v[2:3], v[0:1]
; CHECK-NEXT: s_xor_b32 s7, exec_lo, s6
; CHECK-NEXT: s_cbranch_execnz .LBB5_3
@@ -773,10 +773,10 @@ define void @memmove_p1_p0(ptr addrspace(1) align 1 %dst, ptr addrspace(0) align
; CHECK-NEXT: ; %bb.4: ; %memmove_fwd_main_loop.preheader
; CHECK-NEXT: v_mov_b32_e32 v5, v3
; CHECK-NEXT: v_mov_b32_e32 v11, v1
-; CHECK-NEXT: v_mov_b32_e32 v13, v7
+; CHECK-NEXT: v_mov_b32_e32 v13, v9
; CHECK-NEXT: v_mov_b32_e32 v4, v2
; CHECK-NEXT: v_mov_b32_e32 v10, v0
-; CHECK-NEXT: v_mov_b32_e32 v12, v6
+; CHECK-NEXT: v_mov_b32_e32 v12, v8
; CHECK-NEXT: s_mov_b32 s9, 0
; CHECK-NEXT: .p2align 6
; CHECK-NEXT: .LBB5_5: ; %memmove_fwd_main_loop
@@ -799,20 +799,20 @@ define void @memmove_p1_p0(ptr addrspace(1) align 1 %dst, ptr addrspace(0) align
; CHECK-NEXT: s_and_saveexec_b32 s8, s4
; CHECK-NEXT: s_cbranch_execz .LBB5_9
; CHECK-NEXT: ; %bb.7: ; %memmove_fwd_residual_loop.preheader
-; CHECK-NEXT: v_add_co_u32 v0, s5, v0, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v7, s5
-; CHECK-NEXT: v_add_co_u32 v2, s5, v2, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, v3, v7, s5
+; CHECK-NEXT: v_add_co_u32 v0, s5, v0, v8
+; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v9, s5
+; CHECK-NEXT: v_add_co_u32 v2, s5, v2, v8
+; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, v3, v9, s5
; CHECK-NEXT: s_mov_b32 s9, 0
; CHECK-NEXT: .p2align 6
; CHECK-NEXT: .LBB5_8: ; %memmove_fwd_residual_loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: flat_load_ubyte v4, v[2:3]
-; CHECK-NEXT: v_add_co_u32 v8, s5, v8, -1
-; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v9, s5
+; CHECK-NEXT: v_add_co_u32 v6, s5, v6, -1
+; CHECK-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v7, s5
; CHECK-NEXT: v_add_co_u32 v2, s5, v2, 1
; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, s5
-; CHECK-NEXT: v_cmp_eq_u64_e64 s5, 0, v[8:9]
+; CHECK-NEXT: v_cmp_eq_u64_e64 s5, 0, v[6:7]
; CHECK-NEXT: s_or_b32 s9, s5, s9
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: global_store_byte v[0:1], v4, off
@@ -822,10 +822,10 @@ define void @memmove_p1_p0(ptr addrspace(1) align 1 %dst, ptr addrspace(0) align
; CHECK-NEXT: s_cbranch_execnz .LBB5_8
; CHECK-NEXT: .LBB5_9: ; %Flow30
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s8
-; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7
+; CHECK-NEXT: ; implicit-def: $vgpr8_vgpr9
; CHECK-NEXT: ; implicit-def: $vgpr0_vgpr1
; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3
-; CHECK-NEXT: ; implicit-def: $vgpr8_vgpr9
+; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7
; CHECK-NEXT: ; implicit-def: $vgpr4_vgpr5
; CHECK-NEXT: s_andn2_saveexec_b32 s6, s7
; CHECK-NEXT: s_cbranch_execz .LBB5_2
@@ -844,11 +844,11 @@ define void @memmove_p1_p0(ptr addrspace(1) align 1 %dst, ptr addrspace(0) align
; CHECK-NEXT: .LBB5_12: ; %memmove_bwd_residual_loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: flat_load_ubyte v12, v[10:11]
-; CHECK-NEXT: v_add_co_u32 v8, s4, v8, -1
-; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v9, s4
+; CHECK-NEXT: v_add_co_u32 v6, s4, v6, -1
+; CHECK-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v7, s4
; CHECK-NEXT: v_add_co_u32 v10, s4, v10, -1
; CHECK-NEXT: v_add_co_ci_u32_e64 v11, null, -1, v11, s4
-; CHECK-NEXT: v_cmp_eq_u64_e64 s4, 0, v[8:9]
+; CHECK-NEXT: v_cmp_eq_u64_e64 s4, 0, v[6:7]
; CHECK-NEXT: s_or_b32 s8, s4, s8
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: global_store_byte v[4:5], v12, off
@@ -869,19 +869,19 @@ define void @memmove_p1_p0(ptr addrspace(1) align 1 %dst, ptr addrspace(0) align
; CHECK-NEXT: .p2align 6
; CHECK-NEXT: .LBB5_15: ; %memmove_bwd_main_loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v2, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, v3, v7, vcc_lo
-; CHECK-NEXT: v_add_co_u32 v12, s4, v0, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v13, null, v1, v7, s4
-; CHECK-NEXT: flat_load_dwordx4 v[8:11], v[4:5]
-; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v6, -16
-; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, -1, v7, vcc_lo
-; CHECK-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[4:5]
-; CHECK-NEXT: v_mov_b32_e32 v7, v5
-; CHECK-NEXT: v_mov_b32_e32 v6, v4
+; CHECK-NEXT: v_mov_b32_e32 v11, v9
+; CHECK-NEXT: v_mov_b32_e32 v10, v8
+; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v2, v10
+; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, v3, v11, vcc_lo
+; CHECK-NEXT: v_add_co_u32 v8, vcc_lo, v10, -16
+; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v11, vcc_lo
+; CHECK-NEXT: flat_load_dwordx4 v[4:7], v[4:5]
+; CHECK-NEXT: v_add_co_u32 v10, s4, v0, v10
+; CHECK-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[8:9]
+; CHECK-NEXT: v_add_co_ci_u32_e64 v11, null, v1, v11, s4
; CHECK-NEXT: s_or_b32 s7, vcc_lo, s7
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: global_store_dwordx4 v[12:13], v[8:11], off
+; CHECK-NEXT: global_store_dwordx4 v[10:11], v[4:7], off
; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s7
; CHECK-NEXT: s_cbranch_execnz .LBB5_15
; CHECK-NEXT: .LBB5_16: ; %Flow34
@@ -897,13 +897,13 @@ define void @memmove_p1_p1(ptr addrspace(1) align 1 %dst, ptr addrspace(1) align
; CHECK-LABEL: memmove_p1_p1:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_and_b32_e32 v8, 15, v4
-; CHECK-NEXT: v_mov_b32_e32 v9, 0
-; CHECK-NEXT: v_and_b32_e32 v6, -16, v4
-; CHECK-NEXT: v_mov_b32_e32 v7, v5
+; CHECK-NEXT: v_and_b32_e32 v6, 15, v4
+; CHECK-NEXT: v_mov_b32_e32 v7, 0
+; CHECK-NEXT: v_and_b32_e32 v8, -16, v4
+; CHECK-NEXT: v_mov_b32_e32 v9, v5
; CHECK-NEXT: s_mov_b32 s6, exec_lo
-; CHECK-NEXT: v_cmp_ne_u64_e64 s4, 0, v[8:9]
-; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
+; CHECK-NEXT: v_cmp_ne_u64_e64 s4, 0, v[6:7]
+; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[8:9]
; CHECK-NEXT: v_cmpx_ge_u64_e64 v[2:3], v[0:1]
; CHECK-NEXT: s_xor_b32 s7, exec_lo, s6
; CHECK-NEXT: s_cbranch_execnz .LBB6_3
@@ -919,10 +919,10 @@ define void @memmove_p1_p1(ptr addrspace(1) align 1 %dst, ptr addrspace(1) align
; CHECK-NEXT: ; %bb.4: ; %memmove_fwd_main_loop.preheader
; CHECK-NEXT: v_mov_b32_e32 v5, v3
; CHECK-NEXT: v_mov_b32_e32 v11, v1
-; CHECK-NEXT: v_mov_b32_e32 v13, v7
+; CHECK-NEXT: v_mov_b32_e32 v13, v9
; CHECK-NEXT: v_mov_b32_e32 v4, v2
; CHECK-NEXT: v_mov_b32_e32 v10, v0
-; CHECK-NEXT: v_mov_b32_e32 v12, v6
+; CHECK-NEXT: v_mov_b32_e32 v12, v8
; CHECK-NEXT: s_mov_b32 s9, 0
; CHECK-NEXT: .p2align 6
; CHECK-NEXT: .LBB6_5: ; %memmove_fwd_main_loop
@@ -945,20 +945,20 @@ define void @memmove_p1_p1(ptr addrspace(1) align 1 %dst, ptr addrspace(1) align
; CHECK-NEXT: s_and_saveexec_b32 s8, s4
; CHECK-NEXT: s_cbranch_execz .LBB6_9
; CHECK-NEXT: ; %bb.7: ; %memmove_fwd_residual_loop.preheader
-; CHECK-NEXT: v_add_co_u32 v0, s5, v0, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v7, s5
-; CHECK-NEXT: v_add_co_u32 v2, s5, v2, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, v3, v7, s5
+; CHECK-NEXT: v_add_co_u32 v0, s5, v0, v8
+; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v9, s5
+; CHECK-NEXT: v_add_co_u32 v2, s5, v2, v8
+; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, v3, v9, s5
; CHECK-NEXT: s_mov_b32 s9, 0
; CHECK-NEXT: .p2align 6
; CHECK-NEXT: .LBB6_8: ; %memmove_fwd_residual_loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: global_load_ubyte v4, v[2:3], off
-; CHECK-NEXT: v_add_co_u32 v8, s5, v8, -1
-; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v9, s5
+; CHECK-NEXT: v_add_co_u32 v6, s5, v6, -1
+; CHECK-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v7, s5
; CHECK-NEXT: v_add_co_u32 v2, s5, v2, 1
; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, s5
-; CHECK-NEXT: v_cmp_eq_u64_e64 s5, 0, v[8:9]
+; CHECK-NEXT: v_cmp_eq_u64_e64 s5, 0, v[6:7]
; CHECK-NEXT: s_or_b32 s9, s5, s9
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: global_store_byte v[0:1], v4, off
@@ -968,10 +968,10 @@ define void @memmove_p1_p1(ptr addrspace(1) align 1 %dst, ptr addrspace(1) align
; CHECK-NEXT: s_cbranch_execnz .LBB6_8
; CHECK-NEXT: .LBB6_9: ; %Flow32
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s8
-; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7
+; CHECK-NEXT: ; implicit-def: $vgpr8_vgpr9
; CHECK-NEXT: ; implicit-def: $vgpr0_vgpr1
; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3
-; CHECK-NEXT: ; implicit-def: $vgpr8_vgpr9
+; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7
; CHECK-NEXT: ; implicit-def: $vgpr4_vgpr5
; CHECK-NEXT: s_andn2_saveexec_b32 s6, s7
; CHECK-NEXT: s_cbranch_execz .LBB6_2
@@ -990,11 +990,11 @@ define void @memmove_p1_p1(ptr addrspace(1) align 1 %dst, ptr addrspace(1) align
; CHECK-NEXT: .LBB6_12: ; %memmove_bwd_residual_loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: global_load_ubyte v12, v[10:11], off
-; CHECK-NEXT: v_add_co_u32 v8, s4, v8, -1
-; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v9, s4
+; CHECK-NEXT: v_add_co_u32 v6, s4, v6, -1
+; CHECK-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v7, s4
; CHECK-NEXT: v_add_co_u32 v10, s4, v10, -1
; CHECK-NEXT: v_add_co_ci_u32_e64 v11, null, -1, v11, s4
-; CHECK-NEXT: v_cmp_eq_u64_e64 s4, 0, v[8:9]
+; CHECK-NEXT: v_cmp_eq_u64_e64 s4, 0, v[6:7]
; CHECK-NEXT: s_or_b32 s8, s4, s8
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: global_store_byte v[4:5], v12, off
@@ -1015,19 +1015,19 @@ define void @memmove_p1_p1(ptr addrspace(1) align 1 %dst, ptr addrspace(1) align
; CHECK-NEXT: .p2align 6
; CHECK-NEXT: .LBB6_15: ; %memmove_bwd_main_loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v2, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, v3, v7, vcc_lo
-; CHECK-NEXT: v_add_co_u32 v12, s4, v0, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v13, null, v1, v7, s4
-; CHECK-NEXT: global_load_dwordx4 v[8:11], v[4:5], off
-; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v6, -16
-; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, -1, v7, vcc_lo
-; CHECK-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[4:5]
-; CHECK-NEXT: v_mov_b32_e32 v7, v5
-; CHECK-NEXT: v_mov_b32_e32 v6, v4
+; CHECK-NEXT: v_mov_b32_e32 v11, v9
+; CHECK-NEXT: v_mov_b32_e32 v10, v8
+; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v2, v10
+; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, v3, v11, vcc_lo
+; CHECK-NEXT: v_add_co_u32 v8, vcc_lo, v10, -16
+; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v11, vcc_lo
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[4:5], off
+; CHECK-NEXT: v_add_co_u32 v10, s4, v0, v10
+; CHECK-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[8:9]
+; CHECK-NEXT: v_add_co_ci_u32_e64 v11, null, v1, v11, s4
; CHECK-NEXT: s_or_b32 s7, vcc_lo, s7
; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: global_store_dwordx4 v[12:13], v[8:11], off
+; CHECK-NEXT: global_store_dwordx4 v[10:11], v[4:7], off
; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s7
; CHECK-NEXT: s_cbranch_execnz .LBB6_15
; CHECK-NEXT: .LBB6_16: ; %Flow36
@@ -1109,13 +1109,13 @@ define void @memmove_p1_p4(ptr addrspace(1) align 1 %dst, ptr addrspace(4) align
; CHECK-LABEL: memmove_p1_p4:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_and_b32_e32 v8, 15, v4
-; CHECK-NEXT: v_mov_b32_e32 v9, 0
-; CHECK-NEXT: v_and_b32_e32 v6, -16, v4
-; CHECK-NEXT: v_mov_b32_e32 v7, v5
+; CHECK-NEXT: v_and_b32_e32 v6, 15, v4
+; CHECK-NEXT: v_mov_b32_e32 v7, 0
+; CHECK-NEXT: v_and_b32_e32 v8, -16, v4
+; CHECK-NEXT: v_mov_b32_e32 v9, v5
; CHECK-NEXT: s_mov_b32 s6, exec_lo
-; CHECK-NEXT: v_cmp_ne_u64_e64 s4, 0, v[8:9]
-; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[6:7]
+; CHECK-NEXT: v_cmp_ne_u64_e64 s4, 0, v[6:7]
+; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[8:9]
; CHECK-NEXT: v_cmpx_ge_u64_e64 v[2:3], v[0:1]
; CHECK-NEXT: s_xor_b32 s7, exec_lo, s6
; CHECK-NEXT: s_cbranch_execnz .LBB8_3
@@ -1131,10 +1131,10 @@ define void @memmove_p1_p4(ptr addrspace(1) align 1 %dst, ptr addrspace(4) align
; CHECK-NEXT: ; %bb.4: ; %memmove_fwd_main_loop.preheader
; CHECK-NEXT: v_mov_b32_e32 v5, v3
; CHECK-NEXT: v_mov_b32_e32 v11, v1
-; CHECK-NEXT: v_mov_b32_e32 v13, v7
+; CHECK-NEXT: v_mov_b32_e32 v13, v9
; CHECK-NEXT: v_mov_b32_e32 v4, v2
; CHECK-NEXT: v_mov_b32_e32 v10, v0
-; CHECK-NEXT: v_mov_b32_e32 v12, v6
+; CHECK-NEXT: v_mov_b32_e32 v12, v8
; CHECK-NEXT: s_mov_b32 s9, 0
; CHECK-NEXT: .p2align 6
; CHECK-NEXT: .LBB8_5: ; %memmove_fwd_main_loop
@@ -1157,20 +1157,20 @@ define void @memmove_p1_p4(ptr addrspace(1) align 1 %dst, ptr addrspace(4) align
; CHECK-NEXT: s_and_saveexec_b32 s8, s4
; CHECK-NEXT: s_cbranch_execz .LBB8_9
; CHECK-NEXT: ; %bb.7: ; %memmove_fwd_residual_loop.preheader
-; CHECK-NEXT: v_add_co_u32 v0, s5, v0, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v7, s5
-; CHECK-NEXT: v_add_co_u32 v2, s5, v2, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, v3, v7, s5
+; CHECK-NEXT: v_add_co_u32 v0, s5, v0, v8
+; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, v1, v9, s5
+; CHECK-NEXT: v_add_co_u32 v2, s5, v2, v8
+; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, v3, v9, s5
; CHECK-NEXT: s_mov_b32 s9, 0
; CHECK-NEXT: .p2align 6
; CHECK-NEXT: .LBB8_8: ; %memmove_fwd_residual_loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: global_load_ubyte v4, v[2:3], off
-; CHECK-NEXT: v_add_co_u32 v8, s5, v8, -1
-; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v9, s5
+; CHECK-NEXT: v_add_co_u32 v6, s5, v6, -1
+; CHECK-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v7, s5
; CHECK-NEXT: v_add_co_u32 v2, s5, v2, 1
; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, s5
-; CHECK-NEXT: v_cmp_eq_u64_e64 s5, 0, v[8:9]
+; CHECK-NEXT: v_cmp_eq_u64_e64 s5, 0, v[6:7]
; CHECK-NEXT: s_or_b32 s9, s5, s9
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: global_store_byte v[0:1], v4, off
@@ -1180,10 +1180,10 @@ define void @memmove_p1_p4(ptr addrspace(1) align 1 %dst, ptr addrspace(4) align
; CHECK-NEXT: s_cbranch_execnz .LBB8_8
; CHECK-NEXT: .LBB8_9: ; %Flow31
; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s8
-; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7
+; CHECK-NEXT: ; implicit-def: $vgpr8_vgpr9
; CHECK-NEXT: ; implicit-def: $vgpr0_vgpr1
; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3
-; CHECK-NEXT: ; implicit-def: $vgpr8_vgpr9
+; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7
; CHECK-NEXT: ; implicit-def: $vgpr4_vgpr5
; CHECK-NEXT: s_andn2_saveexec_b32 s6, s7
; CHECK-NEXT: s_cbranch_execz .LBB8_2
@@ -1202,11 +1202,11 @@ define void @memmove_p1_p4(ptr addrspace(1) align 1 %dst, ptr addrspace(4) align
; CHECK-NEXT: .LBB8_12: ; %memmove_bwd_residual_loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: global_load_ubyte v12, v[10:11], off
-; CHECK-NEXT: v_add_co_u32 v8, s4, v8, -1
-; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v9, s4
+; CHECK-NEXT: v_add_co_u32 v6, s4, v6, -1
+; CHECK-NEXT: v_add_co_ci_u32_e64 v7, null, -1, v7, s4
; CHECK-NEXT: v_add_co_u32 v10, s4, v10, -1
; CHECK-NEXT: v_add_co_ci_u32_e64 v11, null, -1, v11, s4
-; CHECK-NEXT: v_cmp_eq_u64_e64 s4, 0, v[8:9]
+; CHECK-NEXT: v_cmp_eq_u64_e64 s4, 0, v[6:7]
; CHECK-NEXT: s_or_b32 s8, s4, s8
; CHECK-NEXT: s_waitcnt vmcnt(0)
; CHECK-NEXT: global_store_byte v[4:5], v12, off
@@ -1227,19 +1227,19 @@ define void @memmove_p1_p4(ptr addrspace(1) align 1 %dst, ptr addrspace(4) align
; CHECK-NEXT: .p2align 6
; CHECK-NEXT: .LBB8_15: ; %memmove_bwd_main_loop
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v2, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, v3, v7, vcc_lo
-; CHECK-NEXT: v_add_co_u32 v12, s4, v0, v6
-; CHECK-NEXT: v_add_co_ci_u32_e64 v13, null, v1, v7, s4
-; CHECK-NEXT: global_load_dwordx4 v[8:11], v[4:5], off
-; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v6, -16
-; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, -1, v7, vcc_lo
-; CHECK-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[4:5]
-; CHECK-NEXT: v_mov_b32_e32 v7, v5
-; CHECK-NEXT: v_mov_b32_e32 v6, v4
+; CHECK-NEXT: v_mov_b32_e32 v11, v9
+; CHECK-NEXT: v_mov_b32_e32 v10, v8
+; CHECK-NEXT: v_add_co_u32 v4, vcc_lo, v2, v10
+; CHECK-NEXT: v_add_co_ci_u32_e64 v5, null, v3, v11, vcc_lo
+; CHECK-NEXT: v_add_co_u32 v8, vcc_lo, v10, -16
+; CHECK-NEXT: v_add_co_ci_u32_e64 v9, null, -1, v11, vcc_lo
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v[4:5], off
+; CHECK-NEXT: v_add_co_u32 v10, s4, v0, v10
+; CHECK-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[8:9]
+; CHECK-NEXT: v_add_co_ci_u32_e64 v11, null, v1, v11, s4
; CHECK-NEXT: s_or_b32 s7, vcc_lo, s7
; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: global_store_dwordx4 v[12:13], v[8:11], off
+; CHECK-NEXT: global_store_dwordx4 v[10:11], v[4:7], off
; CHECK-NEXT: s_andn2_b32 exec_lo, exec_lo, s7
; CHECK-NEXT: s_cbranch_execnz .LBB8_15
; CHECK-NEXT: .LBB8_16: ; %Flow35
diff --git a/llvm/test/CodeGen/AMDGPU/mul_int24.ll b/llvm/test/CodeGen/AMDGPU/mul_int24.ll
index 3d9c2a2..10d4eb0 100644
--- a/llvm/test/CodeGen/AMDGPU/mul_int24.ll
+++ b/llvm/test/CodeGen/AMDGPU/mul_int24.ll
@@ -10,46 +10,43 @@ define amdgpu_kernel void @test_smul24_i32(ptr addrspace(1) %out, i32 %a, i32 %b
; SI-LABEL: test_smul24_i32:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_bfe_i32 s2, s2, 0x180000
-; SI-NEXT: s_bfe_i32 s3, s3, 0x180000
-; SI-NEXT: s_mul_i32 s2, s2, s3
-; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: v_mov_b32_e32 v0, s2
-; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_bfe_i32 s2, s4, 0x180000
+; SI-NEXT: s_bfe_i32 s4, s5, 0x180000
+; SI-NEXT: s_mul_i32 s4, s2, s4
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: test_smul24_i32:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT: s_mov_b32 s7, 0xf000
-; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: s_mov_b32 s4, s0
-; VI-NEXT: s_mov_b32 s5, s1
-; VI-NEXT: s_bfe_i32 s0, s2, 0x180000
-; VI-NEXT: s_bfe_i32 s1, s3, 0x180000
-; VI-NEXT: s_mul_i32 s0, s0, s1
-; VI-NEXT: v_mov_b32_e32 v0, s0
-; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; VI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; VI-NEXT: s_bfe_i32 s4, s4, 0x180000
+; VI-NEXT: s_bfe_i32 s5, s5, 0x180000
+; VI-NEXT: s_mul_i32 s4, s4, s5
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_mov_b32_e32 v0, s4
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: test_smul24_i32:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX9-NEXT: s_mov_b32 s7, 0xf000
-; GFX9-NEXT: s_mov_b32 s6, -1
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_mov_b32 s4, s0
-; GFX9-NEXT: s_mov_b32 s5, s1
-; GFX9-NEXT: s_bfe_i32 s0, s2, 0x180000
-; GFX9-NEXT: s_bfe_i32 s1, s3, 0x180000
-; GFX9-NEXT: s_mul_i32 s0, s0, s1
-; GFX9-NEXT: v_mov_b32_e32 v0, s0
-; GFX9-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX9-NEXT: s_bfe_i32 s4, s4, 0x180000
+; GFX9-NEXT: s_bfe_i32 s5, s5, 0x180000
+; GFX9-NEXT: s_mul_i32 s4, s4, s5
+; GFX9-NEXT: s_mov_b32 s3, 0xf000
+; GFX9-NEXT: s_mov_b32 s2, -1
+; GFX9-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX9-NEXT: s_endpgm
;
; EG-LABEL: test_smul24_i32:
@@ -127,16 +124,15 @@ define amdgpu_kernel void @test_smulhi24_i64(ptr addrspace(1) %out, i32 %a, i32
; GFX9-LABEL: test_smulhi24_i64:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX9-NEXT: s_mov_b32 s7, 0xf000
-; GFX9-NEXT: s_mov_b32 s6, -1
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_mov_b32 s4, s0
-; GFX9-NEXT: s_mov_b32 s5, s1
-; GFX9-NEXT: s_bfe_i32 s0, s2, 0x180000
-; GFX9-NEXT: s_bfe_i32 s1, s3, 0x180000
-; GFX9-NEXT: s_mul_hi_i32 s0, s0, s1
-; GFX9-NEXT: v_mov_b32_e32 v0, s0
-; GFX9-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX9-NEXT: s_bfe_i32 s4, s4, 0x180000
+; GFX9-NEXT: s_bfe_i32 s5, s5, 0x180000
+; GFX9-NEXT: s_mul_hi_i32 s4, s4, s5
+; GFX9-NEXT: s_mov_b32 s3, 0xf000
+; GFX9-NEXT: s_mov_b32 s2, -1
+; GFX9-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX9-NEXT: s_endpgm
;
; EG-LABEL: test_smulhi24_i64:
@@ -464,29 +460,26 @@ define amdgpu_kernel void @test_smul24_i33(ptr addrspace(1) %out, i33 %a, i33 %b
; SI-LABEL: test_smul24_i33:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0xd
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
+; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: s_bfe_i32 s0, s8, 0x180000
-; SI-NEXT: s_bfe_i32 s1, s2, 0x180000
-; SI-NEXT: v_mov_b32_e32 v0, s0
-; SI-NEXT: s_mul_i32 s0, s1, s0
-; SI-NEXT: v_mul_hi_i32_i24_e32 v1, s1, v0
-; SI-NEXT: v_mov_b32_e32 v0, s0
+; SI-NEXT: s_mov_b64 s[6:7], s[2:3]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_bfe_i32 s4, s4, 0x180000
+; SI-NEXT: s_bfe_i32 s5, s6, 0x180000
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: s_mul_i32 s4, s5, s4
+; SI-NEXT: v_mul_hi_i32_i24_e32 v1, s5, v0
+; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_lshl_b64 v[0:1], v[0:1], 31
; SI-NEXT: v_ashr_i64 v[0:1], v[0:1], 31
-; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: test_smul24_i33:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
-; VI-NEXT: s_mov_b32 s7, 0xf000
-; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_bfe_i32 s2, s2, 0x180000
; VI-NEXT: s_bfe_i32 s3, s4, 0x180000
@@ -494,10 +487,10 @@ define amdgpu_kernel void @test_smul24_i33(ptr addrspace(1) %out, i33 %a, i33 %b
; VI-NEXT: v_mul_hi_i32_i24_e32 v1, s2, v0
; VI-NEXT: v_mul_i32_i24_e32 v0, s2, v0
; VI-NEXT: v_lshlrev_b64 v[0:1], 31, v[0:1]
-; VI-NEXT: s_mov_b32 s4, s0
+; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: v_ashrrev_i64 v[0:1], 31, v[0:1]
-; VI-NEXT: s_mov_b32 s5, s1
-; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: test_smul24_i33:
@@ -577,31 +570,29 @@ define amdgpu_kernel void @test_smulhi24_i33(ptr addrspace(1) %out, i33 %a, i33
; SI-LABEL: test_smulhi24_i33:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0xd
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
+; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: v_mov_b32_e32 v0, s8
-; SI-NEXT: v_mul_hi_i32_i24_e32 v0, s2, v0
+; SI-NEXT: s_mov_b64 s[6:7], s[2:3]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: v_mul_hi_i32_i24_e32 v0, s6, v0
; SI-NEXT: v_and_b32_e32 v0, 1, v0
-; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: test_smulhi24_i33:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x34
-; VI-NEXT: s_mov_b32 s7, 0xf000
-; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: s_mov_b32 s4, s0
-; VI-NEXT: v_mov_b32_e32 v0, s8
-; VI-NEXT: v_mul_hi_i32_i24_e32 v0, s2, v0
-; VI-NEXT: s_mov_b32 s5, s1
+; VI-NEXT: s_mov_b64 s[6:7], s[2:3]
+; VI-NEXT: v_mov_b32_e32 v0, s4
+; VI-NEXT: v_mul_hi_i32_i24_e32 v0, s6, v0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: v_and_b32_e32 v0, 1, v0
-; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: test_smulhi24_i33:
diff --git a/llvm/test/CodeGen/AMDGPU/mul_uint24-amdgcn.ll b/llvm/test/CodeGen/AMDGPU/mul_uint24-amdgcn.ll
index e29da3a..1165401 100644
--- a/llvm/test/CodeGen/AMDGPU/mul_uint24-amdgcn.ll
+++ b/llvm/test/CodeGen/AMDGPU/mul_uint24-amdgcn.ll
@@ -10,46 +10,43 @@ define amdgpu_kernel void @test_umul24_i32(ptr addrspace(1) %out, i32 %a, i32 %b
; SI-LABEL: test_umul24_i32:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_and_b32 s2, s2, 0xffffff
-; SI-NEXT: s_and_b32 s3, s3, 0xffffff
-; SI-NEXT: s_mul_i32 s2, s2, s3
-; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: v_mov_b32_e32 v0, s2
-; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_and_b32 s2, s4, 0xffffff
+; SI-NEXT: s_and_b32 s4, s5, 0xffffff
+; SI-NEXT: s_mul_i32 s4, s2, s4
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: test_umul24_i32:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT: s_mov_b32 s7, 0xf000
-; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: s_mov_b32 s4, s0
-; VI-NEXT: s_mov_b32 s5, s1
-; VI-NEXT: s_and_b32 s0, s2, 0xffffff
-; VI-NEXT: s_and_b32 s1, s3, 0xffffff
-; VI-NEXT: s_mul_i32 s0, s0, s1
-; VI-NEXT: v_mov_b32_e32 v0, s0
-; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; VI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; VI-NEXT: s_and_b32 s4, s4, 0xffffff
+; VI-NEXT: s_and_b32 s5, s5, 0xffffff
+; VI-NEXT: s_mul_i32 s4, s4, s5
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_mov_b32_e32 v0, s4
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: test_umul24_i32:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX9-NEXT: s_mov_b32 s7, 0xf000
-; GFX9-NEXT: s_mov_b32 s6, -1
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_mov_b32 s4, s0
-; GFX9-NEXT: s_mov_b32 s5, s1
-; GFX9-NEXT: s_and_b32 s0, s2, 0xffffff
-; GFX9-NEXT: s_and_b32 s1, s3, 0xffffff
-; GFX9-NEXT: s_mul_i32 s0, s0, s1
-; GFX9-NEXT: v_mov_b32_e32 v0, s0
-; GFX9-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX9-NEXT: s_and_b32 s4, s4, 0xffffff
+; GFX9-NEXT: s_and_b32 s5, s5, 0xffffff
+; GFX9-NEXT: s_mul_i32 s4, s4, s5
+; GFX9-NEXT: s_mov_b32 s3, 0xf000
+; GFX9-NEXT: s_mov_b32 s2, -1
+; GFX9-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX9-NEXT: s_endpgm
entry:
%0 = shl i32 %a, 8
@@ -406,16 +403,15 @@ define amdgpu_kernel void @test_umulhi24_i32_i64(ptr addrspace(1) %out, i32 %a,
; GFX9-LABEL: test_umulhi24_i32_i64:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX9-NEXT: s_mov_b32 s7, 0xf000
-; GFX9-NEXT: s_mov_b32 s6, -1
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_mov_b32 s4, s0
-; GFX9-NEXT: s_mov_b32 s5, s1
-; GFX9-NEXT: s_and_b32 s0, s2, 0xffffff
-; GFX9-NEXT: s_and_b32 s1, s3, 0xffffff
-; GFX9-NEXT: s_mul_hi_u32 s0, s0, s1
-; GFX9-NEXT: v_mov_b32_e32 v0, s0
-; GFX9-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX9-NEXT: s_and_b32 s4, s4, 0xffffff
+; GFX9-NEXT: s_and_b32 s5, s5, 0xffffff
+; GFX9-NEXT: s_mul_hi_u32 s4, s4, s5
+; GFX9-NEXT: s_mov_b32 s3, 0xf000
+; GFX9-NEXT: s_mov_b32 s2, -1
+; GFX9-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX9-NEXT: s_endpgm
entry:
%a.24 = and i32 %a, 16777215
@@ -632,33 +628,31 @@ define amdgpu_kernel void @test_umulhi16_i32(ptr addrspace(1) %out, i32 %a, i32
; SI-LABEL: test_umulhi16_i32:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_and_b32 s2, s2, 0xffff
-; SI-NEXT: s_and_b32 s3, s3, 0xffff
-; SI-NEXT: s_mul_i32 s2, s2, s3
-; SI-NEXT: s_lshr_b32 s2, s2, 16
-; SI-NEXT: s_mov_b32 s6, -1
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: v_mov_b32_e32 v0, s2
-; SI-NEXT: buffer_store_short v0, off, s[4:7], 0
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_and_b32 s2, s4, 0xffff
+; SI-NEXT: s_and_b32 s4, s5, 0xffff
+; SI-NEXT: s_mul_i32 s2, s2, s4
+; SI-NEXT: s_lshr_b32 s4, s2, 16
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: test_umulhi16_i32:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT: s_mov_b32 s7, 0xf000
-; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: s_mov_b32 s4, s0
-; VI-NEXT: s_mov_b32 s5, s1
-; VI-NEXT: s_and_b32 s0, s2, 0xffff
-; VI-NEXT: s_and_b32 s1, s3, 0xffff
-; VI-NEXT: s_mul_i32 s0, s0, s1
-; VI-NEXT: s_lshr_b32 s0, s0, 16
-; VI-NEXT: v_mov_b32_e32 v0, s0
-; VI-NEXT: buffer_store_short v0, off, s[4:7], 0
+; VI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; VI-NEXT: s_and_b32 s4, s4, 0xffff
+; VI-NEXT: s_and_b32 s5, s5, 0xffff
+; VI-NEXT: s_mul_i32 s4, s4, s5
+; VI-NEXT: s_lshr_b32 s4, s4, 16
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_mov_b32_e32 v0, s4
+; VI-NEXT: buffer_store_short v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: test_umulhi16_i32:
diff --git a/llvm/test/CodeGen/AMDGPU/nofpclass-call.ll b/llvm/test/CodeGen/AMDGPU/nofpclass-call.ll
new file mode 100644
index 0000000..5f303cc
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/nofpclass-call.ll
@@ -0,0 +1,191 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck %s
+
+; Check that nofpclass attributes on call returns are used in
+; selectiondag.
+
+define internal float @func_f32(ptr addrspace(1) %ptr) {
+; CHECK-LABEL: func_f32:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dword v0, v[0:1], off glc
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %ld = load volatile float, ptr addrspace(1) %ptr
+ ret float %ld
+}
+
+define float @call_nofpclass_funcs_f32(ptr addrspace(1) %ptr) {
+; CHECK-LABEL: call_nofpclass_funcs_f32:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_mov_b32 s18, s33
+; CHECK-NEXT: s_mov_b32 s33, s32
+; CHECK-NEXT: s_xor_saveexec_b64 s[16:17], -1
+; CHECK-NEXT: buffer_store_dword v4, off, s[0:3], s33 ; 4-byte Folded Spill
+; CHECK-NEXT: s_mov_b64 exec, s[16:17]
+; CHECK-NEXT: s_addk_i32 s32, 0x400
+; CHECK-NEXT: v_writelane_b32 v4, s30, 0
+; CHECK-NEXT: s_getpc_b64 s[16:17]
+; CHECK-NEXT: s_add_u32 s16, s16, func_f32@rel32@lo+4
+; CHECK-NEXT: s_addc_u32 s17, s17, func_f32@rel32@hi+12
+; CHECK-NEXT: v_writelane_b32 v4, s31, 1
+; CHECK-NEXT: v_mov_b32_e32 v2, v0
+; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; CHECK-NEXT: v_mov_b32_e32 v3, v0
+; CHECK-NEXT: v_mov_b32_e32 v0, v2
+; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; CHECK-NEXT: v_min_f32_e32 v0, v3, v0
+; CHECK-NEXT: v_readlane_b32 s31, v4, 1
+; CHECK-NEXT: v_readlane_b32 s30, v4, 0
+; CHECK-NEXT: s_mov_b32 s32, s33
+; CHECK-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; CHECK-NEXT: buffer_load_dword v4, off, s[0:3], s33 ; 4-byte Folded Reload
+; CHECK-NEXT: s_mov_b64 exec, s[4:5]
+; CHECK-NEXT: s_mov_b32 s33, s18
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %call0 = call nofpclass(nan) float @func_f32(ptr addrspace(1) %ptr)
+ %call1 = call nofpclass(nan) float @func_f32(ptr addrspace(1) %ptr)
+ %min = call float @llvm.minnum.f32(float %call0, float %call1)
+ ret float %min
+}
+
+define internal <2 x float> @func_v2f32(ptr addrspace(1) %ptr) {
+; CHECK-LABEL: func_v2f32:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx2 v[0:1], v[0:1], off glc
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %ld = load volatile <2 x float>, ptr addrspace(1) %ptr
+ ret <2 x float> %ld
+}
+
+define <2 x float> @call_nofpclass_funcs_v2f32(ptr addrspace(1) %ptr) {
+; CHECK-LABEL: call_nofpclass_funcs_v2f32:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_mov_b32 s18, s33
+; CHECK-NEXT: s_mov_b32 s33, s32
+; CHECK-NEXT: s_xor_saveexec_b64 s[16:17], -1
+; CHECK-NEXT: buffer_store_dword v6, off, s[0:3], s33 ; 4-byte Folded Spill
+; CHECK-NEXT: s_mov_b64 exec, s[16:17]
+; CHECK-NEXT: s_addk_i32 s32, 0x400
+; CHECK-NEXT: v_writelane_b32 v6, s30, 0
+; CHECK-NEXT: s_getpc_b64 s[16:17]
+; CHECK-NEXT: s_add_u32 s16, s16, func_v2f32@rel32@lo+4
+; CHECK-NEXT: s_addc_u32 s17, s17, func_v2f32@rel32@hi+12
+; CHECK-NEXT: v_writelane_b32 v6, s31, 1
+; CHECK-NEXT: v_mov_b32_e32 v2, v1
+; CHECK-NEXT: v_mov_b32_e32 v3, v0
+; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; CHECK-NEXT: v_mov_b32_e32 v4, v0
+; CHECK-NEXT: v_mov_b32_e32 v5, v1
+; CHECK-NEXT: v_mov_b32_e32 v0, v3
+; CHECK-NEXT: v_mov_b32_e32 v1, v2
+; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; CHECK-NEXT: v_min_f32_e32 v0, v4, v0
+; CHECK-NEXT: v_min_f32_e32 v1, v5, v1
+; CHECK-NEXT: v_readlane_b32 s31, v6, 1
+; CHECK-NEXT: v_readlane_b32 s30, v6, 0
+; CHECK-NEXT: s_mov_b32 s32, s33
+; CHECK-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; CHECK-NEXT: buffer_load_dword v6, off, s[0:3], s33 ; 4-byte Folded Reload
+; CHECK-NEXT: s_mov_b64 exec, s[4:5]
+; CHECK-NEXT: s_mov_b32 s33, s18
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %call0 = call nofpclass(nan) <2 x float> @func_v2f32(ptr addrspace(1) %ptr)
+ %call1 = call nofpclass(nan) <2 x float> @func_v2f32(ptr addrspace(1) %ptr)
+ %min = call <2 x float> @llvm.minnum.v2f32(<2 x float> %call0, <2 x float> %call1)
+ ret <2 x float> %min
+}
+
+define internal double @func_f64(ptr addrspace(1) %ptr) {
+; CHECK-LABEL: func_f64:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: global_load_dwordx2 v[0:1], v[0:1], off glc
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %ld = load volatile double, ptr addrspace(1) %ptr
+ ret double %ld
+}
+
+define double @call_nofpclass_funcs_f64(ptr addrspace(1) %ptr) {
+; CHECK-LABEL: call_nofpclass_funcs_f64:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: s_mov_b32 s18, s33
+; CHECK-NEXT: s_mov_b32 s33, s32
+; CHECK-NEXT: s_xor_saveexec_b64 s[16:17], -1
+; CHECK-NEXT: buffer_store_dword v6, off, s[0:3], s33 ; 4-byte Folded Spill
+; CHECK-NEXT: s_mov_b64 exec, s[16:17]
+; CHECK-NEXT: s_addk_i32 s32, 0x400
+; CHECK-NEXT: v_writelane_b32 v6, s30, 0
+; CHECK-NEXT: s_getpc_b64 s[16:17]
+; CHECK-NEXT: s_add_u32 s16, s16, func_f64@rel32@lo+4
+; CHECK-NEXT: s_addc_u32 s17, s17, func_f64@rel32@hi+12
+; CHECK-NEXT: v_writelane_b32 v6, s31, 1
+; CHECK-NEXT: v_mov_b32_e32 v4, v1
+; CHECK-NEXT: v_mov_b32_e32 v5, v0
+; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; CHECK-NEXT: v_mov_b32_e32 v2, v0
+; CHECK-NEXT: v_mov_b32_e32 v3, v1
+; CHECK-NEXT: v_mov_b32_e32 v0, v5
+; CHECK-NEXT: v_mov_b32_e32 v1, v4
+; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; CHECK-NEXT: v_min_f64 v[0:1], v[2:3], v[0:1]
+; CHECK-NEXT: v_readlane_b32 s31, v6, 1
+; CHECK-NEXT: v_readlane_b32 s30, v6, 0
+; CHECK-NEXT: s_mov_b32 s32, s33
+; CHECK-NEXT: s_xor_saveexec_b64 s[4:5], -1
+; CHECK-NEXT: buffer_load_dword v6, off, s[0:3], s33 ; 4-byte Folded Reload
+; CHECK-NEXT: s_mov_b64 exec, s[4:5]
+; CHECK-NEXT: s_mov_b32 s33, s18
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %call0 = call nofpclass(nan) double @func_f64(ptr addrspace(1) %ptr)
+ %call1 = call nofpclass(nan) double @func_f64(ptr addrspace(1) %ptr)
+ %min = call double @llvm.minnum.f64(double %call0, double %call1)
+ ret double %min
+}
+
+define float @call_nofpclass_intrinsic_f32(float %x, float %y, float %z) {
+; CHECK-LABEL: call_nofpclass_intrinsic_f32:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_sqrt_f32_e32 v0, v0
+; CHECK-NEXT: v_sqrt_f32_e32 v1, v1
+; CHECK-NEXT: v_cmp_lt_f32_e32 vcc, v0, v1
+; CHECK-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %call0 = call nofpclass(nan) float @llvm.amdgcn.sqrt.f32(float %x)
+ %call1 = call nofpclass(nan) float @llvm.amdgcn.sqrt.f32(float %y)
+ %lt = fcmp olt float %call0, %call1
+ %min = select nsz i1 %lt, float %call0, float %call1
+ ret float %min
+}
+
+define <2 x half> @call_nofpclass_intrinsic_v2f16(float %x, float %y, float %z, float %w) {
+; CHECK-LABEL: call_nofpclass_intrinsic_v2f16:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_cvt_pkrtz_f16_f32 v0, v0, v1
+; CHECK-NEXT: v_cvt_pkrtz_f16_f32 v1, v2, v3
+; CHECK-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; CHECK-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; CHECK-NEXT: v_cmp_lt_f16_e32 vcc, v0, v1
+; CHECK-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; CHECK-NEXT: v_cmp_lt_f16_e32 vcc, v3, v2
+; CHECK-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc
+; CHECK-NEXT: s_mov_b32 s4, 0x5040100
+; CHECK-NEXT: v_perm_b32 v0, v1, v0, s4
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+ %call0 = call nofpclass(nan) <2 x half> @llvm.amdgcn.cvt.pkrtz(float %x, float %y)
+ %call1 = call nofpclass(nan) <2 x half> @llvm.amdgcn.cvt.pkrtz(float %z, float %w)
+ %lt = fcmp olt <2 x half> %call0, %call1
+ %min = select nsz <2 x i1> %lt, <2 x half> %call0, <2 x half> %call1
+ ret <2 x half> %min
+}
diff --git a/llvm/test/CodeGen/AMDGPU/or.ll b/llvm/test/CodeGen/AMDGPU/or.ll
index 728067e..9afaab5 100644
--- a/llvm/test/CodeGen/AMDGPU/or.ll
+++ b/llvm/test/CodeGen/AMDGPU/or.ll
@@ -136,27 +136,25 @@ define amdgpu_kernel void @scalar_or_i32(ptr addrspace(1) %out, i32 %a, i32 %b)
; GFX6-LABEL: scalar_or_i32:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: s_mov_b32 s4, s0
-; GFX6-NEXT: s_or_b32 s0, s2, s3
-; GFX6-NEXT: s_mov_b32 s5, s1
-; GFX6-NEXT: v_mov_b32_e32 v0, s0
-; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT: s_or_b32 s4, s4, s5
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
+; GFX6-NEXT: s_mov_b32 s2, -1
+; GFX6-NEXT: v_mov_b32_e32 v0, s4
+; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX8-LABEL: scalar_or_i32:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX8-NEXT: s_mov_b32 s7, 0xf000
-; GFX8-NEXT: s_mov_b32 s6, -1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: s_mov_b32 s4, s0
-; GFX8-NEXT: s_or_b32 s0, s2, s3
-; GFX8-NEXT: s_mov_b32 s5, s1
-; GFX8-NEXT: v_mov_b32_e32 v0, s0
-; GFX8-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX8-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX8-NEXT: s_or_b32 s4, s4, s5
+; GFX8-NEXT: s_mov_b32 s3, 0xf000
+; GFX8-NEXT: s_mov_b32 s2, -1
+; GFX8-NEXT: v_mov_b32_e32 v0, s4
+; GFX8-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX8-NEXT: s_endpgm
%or = or i32 %a, %b
store i32 %or, ptr addrspace(1) %out
diff --git a/llvm/test/CodeGen/AMDGPU/reg-coalescer-subreg-liveness.mir b/llvm/test/CodeGen/AMDGPU/reg-coalescer-subreg-liveness.mir
index 381cb8c..f098618 100644
--- a/llvm/test/CodeGen/AMDGPU/reg-coalescer-subreg-liveness.mir
+++ b/llvm/test/CodeGen/AMDGPU/reg-coalescer-subreg-liveness.mir
@@ -16,11 +16,11 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
; CHECK-NEXT: undef [[S_LOAD_DWORD_IMM:%[0-9]+]].sub1:sgpr_128 = S_LOAD_DWORD_IMM [[COPY]], 0, 0 :: (dereferenceable invariant load (s32), align 16, addrspace 4)
; CHECK-NEXT: undef [[S_MOV_B32_:%[0-9]+]].sub0:sgpr_128 = S_MOV_B32 1
+ ; CHECK-NEXT: [[S_LOAD_DWORD_IMM:%[0-9]+]].sub2:sgpr_128 = S_MOV_B32 0
; CHECK-NEXT: undef [[S_MOV_B32_1:%[0-9]+]].sub0:sgpr_256 = S_MOV_B32 0
; CHECK-NEXT: TENSOR_LOAD_TO_LDS_D2 [[S_MOV_B32_]], [[S_MOV_B32_1]], 0, 0, implicit-def dead $tensorcnt, implicit $exec, implicit $tensorcnt
; CHECK-NEXT: [[S_LOAD_DWORD_IMM:%[0-9]+]].sub0:sgpr_128 = S_MOV_B32 1
; CHECK-NEXT: [[S_MOV_B32_:%[0-9]+]].sub1:sgpr_128 = COPY [[S_MOV_B32_]].sub0
- ; CHECK-NEXT: [[S_LOAD_DWORD_IMM:%[0-9]+]].sub2:sgpr_128 = S_MOV_B32 0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
diff --git a/llvm/test/CodeGen/AMDGPU/set-inactive-wwm-overwrite.ll b/llvm/test/CodeGen/AMDGPU/set-inactive-wwm-overwrite.ll
index 5c90957..bcece19 100644
--- a/llvm/test/CodeGen/AMDGPU/set-inactive-wwm-overwrite.ll
+++ b/llvm/test/CodeGen/AMDGPU/set-inactive-wwm-overwrite.ll
@@ -16,11 +16,11 @@ define amdgpu_cs void @if_then(ptr addrspace(8) inreg %input, ptr addrspace(8) i
; GCN-NEXT: s_cbranch_execz .LBB0_4
; GCN-NEXT: ; %bb.3: ; %.then
; GCN-NEXT: s_or_saveexec_b32 s1, -1
-; GCN-NEXT: v_cndmask_b32_e64 v1, 0, v3, s1
-; GCN-NEXT: v_mov_b32_e32 v2, 0
-; GCN-NEXT: v_mov_b32_dpp v2, v1 row_shr:1 row_mask:0xf bank_mask:0xf
+; GCN-NEXT: v_mov_b32_e32 v1, 0
+; GCN-NEXT: v_cndmask_b32_e64 v2, 0, v3, s1
+; GCN-NEXT: v_mov_b32_dpp v1, v2 row_shr:1 row_mask:0xf bank_mask:0xf
; GCN-NEXT: s_mov_b32 exec_lo, s1
-; GCN-NEXT: v_mov_b32_e32 v0, v2
+; GCN-NEXT: v_mov_b32_e32 v0, v1
; GCN-NEXT: v_mov_b32_e32 v4, -1
; GCN-NEXT: v_mov_b32_e32 v3, 0
; GCN-NEXT: buffer_store_dword v4, v0, s[4:7], 0 offen
diff --git a/llvm/test/CodeGen/AMDGPU/sext-divergence-driven-isel.ll b/llvm/test/CodeGen/AMDGPU/sext-divergence-driven-isel.ll
index a0bac53..e589a63 100644
--- a/llvm/test/CodeGen/AMDGPU/sext-divergence-driven-isel.ll
+++ b/llvm/test/CodeGen/AMDGPU/sext-divergence-driven-isel.ll
@@ -5,15 +5,14 @@ define amdgpu_kernel void @sext_i16_to_i32_uniform(ptr addrspace(1) %out, i16 %a
; GCN-LABEL: sext_i16_to_i32_uniform:
; GCN: ; %bb.0:
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GCN-NEXT: s_mov_b32 s7, 0xf000
-; GCN-NEXT: s_mov_b32 s6, -1
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: s_mov_b32 s4, s0
-; GCN-NEXT: s_sext_i32_i16 s0, s2
-; GCN-NEXT: s_add_i32 s0, s3, s0
-; GCN-NEXT: s_mov_b32 s5, s1
-; GCN-NEXT: v_mov_b32_e32 v0, s0
-; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GCN-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GCN-NEXT: s_sext_i32_i16 s4, s4
+; GCN-NEXT: s_add_i32 s4, s5, s4
+; GCN-NEXT: s_mov_b32 s3, 0xf000
+; GCN-NEXT: s_mov_b32 s2, -1
+; GCN-NEXT: v_mov_b32_e32 v0, s4
+; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GCN-NEXT: s_endpgm
%sext = sext i16 %a to i32
%res = add i32 %b, %sext
diff --git a/llvm/test/CodeGen/AMDGPU/shl.v2i16.ll b/llvm/test/CodeGen/AMDGPU/shl.v2i16.ll
index d8511c8..17db379 100644
--- a/llvm/test/CodeGen/AMDGPU/shl.v2i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/shl.v2i16.ll
@@ -22,63 +22,57 @@ define amdgpu_kernel void @s_shl_v2i16(ptr addrspace(1) %out, <2 x i16> %lhs, <2
; VI-LABEL: s_shl_v2i16:
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT: s_mov_b32 s7, 0xf000
-; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: s_mov_b32 s4, s0
-; VI-NEXT: s_mov_b32 s5, s1
-; VI-NEXT: s_lshr_b32 s0, s3, 16
-; VI-NEXT: s_lshr_b32 s1, s2, 16
-; VI-NEXT: s_lshl_b32 s0, s1, s0
-; VI-NEXT: s_lshl_b32 s1, s2, s3
-; VI-NEXT: s_lshl_b32 s0, s0, 16
-; VI-NEXT: s_and_b32 s1, s1, 0xffff
-; VI-NEXT: s_or_b32 s0, s1, s0
-; VI-NEXT: v_mov_b32_e32 v0, s0
-; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; VI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; VI-NEXT: s_lshr_b32 s6, s5, 16
+; VI-NEXT: s_lshr_b32 s7, s4, 16
+; VI-NEXT: s_lshl_b32 s4, s4, s5
+; VI-NEXT: s_lshl_b32 s5, s7, s6
+; VI-NEXT: s_lshl_b32 s5, s5, 16
+; VI-NEXT: s_and_b32 s4, s4, 0xffff
+; VI-NEXT: s_or_b32 s4, s4, s5
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_mov_b32_e32 v0, s4
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; CI-LABEL: s_shl_v2i16:
; CI: ; %bb.0:
; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; CI-NEXT: s_mov_b32 s7, 0xf000
-; CI-NEXT: s_mov_b32 s6, -1
; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: s_mov_b32 s4, s0
-; CI-NEXT: s_mov_b32 s5, s1
-; CI-NEXT: s_lshr_b32 s0, s2, 16
-; CI-NEXT: s_lshr_b32 s1, s3, 16
-; CI-NEXT: s_lshl_b32 s0, s0, s1
-; CI-NEXT: s_lshl_b32 s1, s2, s3
-; CI-NEXT: s_lshl_b32 s0, s0, 16
-; CI-NEXT: s_and_b32 s1, s1, 0xffff
-; CI-NEXT: s_or_b32 s0, s1, s0
-; CI-NEXT: v_mov_b32_e32 v0, s0
-; CI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; CI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; CI-NEXT: s_lshr_b32 s6, s4, 16
+; CI-NEXT: s_lshr_b32 s7, s5, 16
+; CI-NEXT: s_lshl_b32 s4, s4, s5
+; CI-NEXT: s_lshl_b32 s5, s6, s7
+; CI-NEXT: s_lshl_b32 s5, s5, 16
+; CI-NEXT: s_and_b32 s4, s4, 0xffff
+; CI-NEXT: s_or_b32 s4, s4, s5
+; CI-NEXT: s_mov_b32 s3, 0xf000
+; CI-NEXT: s_mov_b32 s2, -1
+; CI-NEXT: v_mov_b32_e32 v0, s4
+; CI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; CI-NEXT: s_endpgm
;
; GFX10-LABEL: s_shl_v2i16:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX10-NEXT: s_mov_b32 s7, 0x31016000
-; GFX10-NEXT: s_mov_b32 s6, -1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: v_pk_lshlrev_b16 v0, s3, s2
-; GFX10-NEXT: s_mov_b32 s4, s0
-; GFX10-NEXT: s_mov_b32 s5, s1
-; GFX10-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX10-NEXT: s_mov_b32 s3, 0x31016000
+; GFX10-NEXT: s_mov_b32 s2, -1
+; GFX10-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: s_shl_v2i16:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX11-NEXT: s_mov_b32 s7, 0x31016000
-; GFX11-NEXT: s_mov_b32 s6, -1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: v_pk_lshlrev_b16 v0, s3, s2
-; GFX11-NEXT: s_mov_b32 s4, s0
-; GFX11-NEXT: s_mov_b32 s5, s1
-; GFX11-NEXT: buffer_store_b32 v0, off, s[4:7], 0
+; GFX11-NEXT: s_mov_b32 s3, 0x31016000
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: buffer_store_b32 v0, off, s[0:3], 0
; GFX11-NEXT: s_endpgm
%result = shl <2 x i16> %lhs, %rhs
store <2 x i16> %result, ptr addrspace(1) %out
diff --git a/llvm/test/CodeGen/AMDGPU/shufflevector.v4f32.v3f32.ll b/llvm/test/CodeGen/AMDGPU/shufflevector.v4f32.v3f32.ll
index d4ee6fa..7c841783 100644
--- a/llvm/test/CodeGen/AMDGPU/shufflevector.v4f32.v3f32.ll
+++ b/llvm/test/CodeGen/AMDGPU/shufflevector.v4f32.v3f32.ll
@@ -3272,9 +3272,8 @@ define void @v_shuffle_v4f32_v3f32__1_2_2_2(ptr addrspace(1) inreg %ptr) {
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; def v[0:2]
; GFX90A-NEXT: ;;#ASMEND
-; GFX90A-NEXT: v_mov_b32_e32 v0, v2
; GFX90A-NEXT: v_mov_b32_e32 v4, 0
-; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[0:1] op_sel:[1,0]
+; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[2:3] op_sel:[1,0]
; GFX90A-NEXT: v_mov_b32_e32 v3, v2
; GFX90A-NEXT: global_store_dwordx4 v4, v[0:3], s[16:17]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -3287,8 +3286,7 @@ define void @v_shuffle_v4f32_v3f32__1_2_2_2(ptr addrspace(1) inreg %ptr) {
; GFX942-NEXT: ; def v[0:2]
; GFX942-NEXT: ;;#ASMEND
; GFX942-NEXT: v_mov_b32_e32 v4, 0
-; GFX942-NEXT: v_mov_b32_e32 v0, v2
-; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[0:1] op_sel:[1,0]
+; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[2:3] op_sel:[1,0]
; GFX942-NEXT: v_mov_b32_e32 v3, v2
; GFX942-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1]
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -3416,12 +3414,11 @@ define void @v_shuffle_v4f32_v3f32__4_2_2_2(ptr addrspace(1) inreg %ptr) {
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; def v[0:2]
; GFX90A-NEXT: ;;#ASMEND
-; GFX90A-NEXT: v_mov_b32_e32 v0, v2
; GFX90A-NEXT: v_mov_b32_e32 v7, 0
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; def v[4:6]
; GFX90A-NEXT: ;;#ASMEND
-; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[0:1] op_sel:[1,0]
+; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[2:3] op_sel:[1,0]
; GFX90A-NEXT: v_mov_b32_e32 v3, v2
; GFX90A-NEXT: global_store_dwordx4 v7, v[0:3], s[16:17]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -3434,12 +3431,12 @@ define void @v_shuffle_v4f32_v3f32__4_2_2_2(ptr addrspace(1) inreg %ptr) {
; GFX942-NEXT: ; def v[0:2]
; GFX942-NEXT: ;;#ASMEND
; GFX942-NEXT: v_mov_b32_e32 v7, 0
-; GFX942-NEXT: v_mov_b32_e32 v0, v2
; GFX942-NEXT: ;;#ASMSTART
; GFX942-NEXT: ; def v[4:6]
; GFX942-NEXT: ;;#ASMEND
+; GFX942-NEXT: s_nop 0
+; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[2:3] op_sel:[1,0]
; GFX942-NEXT: v_mov_b32_e32 v3, v2
-; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[0:1] op_sel:[1,0]
; GFX942-NEXT: global_store_dwordx4 v7, v[0:3], s[0:1]
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: s_setpc_b64 s[30:31]
@@ -6083,9 +6080,8 @@ define void @v_shuffle_v4f32_v3f32__1_5_5_5(ptr addrspace(1) inreg %ptr) {
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; def v[0:2]
; GFX90A-NEXT: ;;#ASMEND
-; GFX90A-NEXT: v_mov_b32_e32 v0, v2
; GFX90A-NEXT: v_mov_b32_e32 v5, 0
-; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[0:1] op_sel:[1,0]
+; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[2:3] op_sel:[1,0]
; GFX90A-NEXT: v_mov_b32_e32 v3, v2
; GFX90A-NEXT: global_store_dwordx4 v5, v[0:3], s[16:17]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -6102,8 +6098,7 @@ define void @v_shuffle_v4f32_v3f32__1_5_5_5(ptr addrspace(1) inreg %ptr) {
; GFX942-NEXT: ; def v[0:2]
; GFX942-NEXT: ;;#ASMEND
; GFX942-NEXT: s_nop 0
-; GFX942-NEXT: v_mov_b32_e32 v0, v2
-; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[0:1] op_sel:[1,0]
+; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[2:3] op_sel:[1,0]
; GFX942-NEXT: v_mov_b32_e32 v3, v2
; GFX942-NEXT: global_store_dwordx4 v5, v[0:3], s[0:1]
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -6241,9 +6236,8 @@ define void @v_shuffle_v4f32_v3f32__4_5_5_5(ptr addrspace(1) inreg %ptr) {
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; def v[0:2]
; GFX90A-NEXT: ;;#ASMEND
-; GFX90A-NEXT: v_mov_b32_e32 v0, v2
; GFX90A-NEXT: v_mov_b32_e32 v4, 0
-; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[0:1] op_sel:[1,0]
+; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[2:3] op_sel:[1,0]
; GFX90A-NEXT: v_mov_b32_e32 v3, v2
; GFX90A-NEXT: global_store_dwordx4 v4, v[0:3], s[16:17]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -6256,8 +6250,7 @@ define void @v_shuffle_v4f32_v3f32__4_5_5_5(ptr addrspace(1) inreg %ptr) {
; GFX942-NEXT: ; def v[0:2]
; GFX942-NEXT: ;;#ASMEND
; GFX942-NEXT: v_mov_b32_e32 v4, 0
-; GFX942-NEXT: v_mov_b32_e32 v0, v2
-; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[0:1] op_sel:[1,0]
+; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[2:3] op_sel:[1,0]
; GFX942-NEXT: v_mov_b32_e32 v3, v2
; GFX942-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1]
; GFX942-NEXT: s_waitcnt vmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/shufflevector.v4i32.v3i32.ll b/llvm/test/CodeGen/AMDGPU/shufflevector.v4i32.v3i32.ll
index 1a669ad..f714935 100644
--- a/llvm/test/CodeGen/AMDGPU/shufflevector.v4i32.v3i32.ll
+++ b/llvm/test/CodeGen/AMDGPU/shufflevector.v4i32.v3i32.ll
@@ -3272,9 +3272,8 @@ define void @v_shuffle_v4i32_v3i32__1_2_2_2(ptr addrspace(1) inreg %ptr) {
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; def v[0:2]
; GFX90A-NEXT: ;;#ASMEND
-; GFX90A-NEXT: v_mov_b32_e32 v0, v2
; GFX90A-NEXT: v_mov_b32_e32 v4, 0
-; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[0:1] op_sel:[1,0]
+; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[2:3] op_sel:[1,0]
; GFX90A-NEXT: v_mov_b32_e32 v3, v2
; GFX90A-NEXT: global_store_dwordx4 v4, v[0:3], s[16:17]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -3287,8 +3286,7 @@ define void @v_shuffle_v4i32_v3i32__1_2_2_2(ptr addrspace(1) inreg %ptr) {
; GFX942-NEXT: ; def v[0:2]
; GFX942-NEXT: ;;#ASMEND
; GFX942-NEXT: v_mov_b32_e32 v4, 0
-; GFX942-NEXT: v_mov_b32_e32 v0, v2
-; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[0:1] op_sel:[1,0]
+; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[2:3] op_sel:[1,0]
; GFX942-NEXT: v_mov_b32_e32 v3, v2
; GFX942-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1]
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -3416,12 +3414,11 @@ define void @v_shuffle_v4i32_v3i32__4_2_2_2(ptr addrspace(1) inreg %ptr) {
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; def v[0:2]
; GFX90A-NEXT: ;;#ASMEND
-; GFX90A-NEXT: v_mov_b32_e32 v0, v2
; GFX90A-NEXT: v_mov_b32_e32 v7, 0
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; def v[4:6]
; GFX90A-NEXT: ;;#ASMEND
-; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[0:1] op_sel:[1,0]
+; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[2:3] op_sel:[1,0]
; GFX90A-NEXT: v_mov_b32_e32 v3, v2
; GFX90A-NEXT: global_store_dwordx4 v7, v[0:3], s[16:17]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -3434,12 +3431,12 @@ define void @v_shuffle_v4i32_v3i32__4_2_2_2(ptr addrspace(1) inreg %ptr) {
; GFX942-NEXT: ; def v[0:2]
; GFX942-NEXT: ;;#ASMEND
; GFX942-NEXT: v_mov_b32_e32 v7, 0
-; GFX942-NEXT: v_mov_b32_e32 v0, v2
; GFX942-NEXT: ;;#ASMSTART
; GFX942-NEXT: ; def v[4:6]
; GFX942-NEXT: ;;#ASMEND
+; GFX942-NEXT: s_nop 0
+; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[2:3] op_sel:[1,0]
; GFX942-NEXT: v_mov_b32_e32 v3, v2
-; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[0:1] op_sel:[1,0]
; GFX942-NEXT: global_store_dwordx4 v7, v[0:3], s[0:1]
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: s_setpc_b64 s[30:31]
@@ -6083,9 +6080,8 @@ define void @v_shuffle_v4i32_v3i32__1_5_5_5(ptr addrspace(1) inreg %ptr) {
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; def v[0:2]
; GFX90A-NEXT: ;;#ASMEND
-; GFX90A-NEXT: v_mov_b32_e32 v0, v2
; GFX90A-NEXT: v_mov_b32_e32 v5, 0
-; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[0:1] op_sel:[1,0]
+; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[2:3] op_sel:[1,0]
; GFX90A-NEXT: v_mov_b32_e32 v3, v2
; GFX90A-NEXT: global_store_dwordx4 v5, v[0:3], s[16:17]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -6102,8 +6098,7 @@ define void @v_shuffle_v4i32_v3i32__1_5_5_5(ptr addrspace(1) inreg %ptr) {
; GFX942-NEXT: ; def v[0:2]
; GFX942-NEXT: ;;#ASMEND
; GFX942-NEXT: s_nop 0
-; GFX942-NEXT: v_mov_b32_e32 v0, v2
-; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[0:1] op_sel:[1,0]
+; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[2:3] op_sel:[1,0]
; GFX942-NEXT: v_mov_b32_e32 v3, v2
; GFX942-NEXT: global_store_dwordx4 v5, v[0:3], s[0:1]
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -6241,9 +6236,8 @@ define void @v_shuffle_v4i32_v3i32__4_5_5_5(ptr addrspace(1) inreg %ptr) {
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; def v[0:2]
; GFX90A-NEXT: ;;#ASMEND
-; GFX90A-NEXT: v_mov_b32_e32 v0, v2
; GFX90A-NEXT: v_mov_b32_e32 v4, 0
-; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[0:1] op_sel:[1,0]
+; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[2:3] op_sel:[1,0]
; GFX90A-NEXT: v_mov_b32_e32 v3, v2
; GFX90A-NEXT: global_store_dwordx4 v4, v[0:3], s[16:17]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -6256,8 +6250,7 @@ define void @v_shuffle_v4i32_v3i32__4_5_5_5(ptr addrspace(1) inreg %ptr) {
; GFX942-NEXT: ; def v[0:2]
; GFX942-NEXT: ;;#ASMEND
; GFX942-NEXT: v_mov_b32_e32 v4, 0
-; GFX942-NEXT: v_mov_b32_e32 v0, v2
-; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[0:1] op_sel:[1,0]
+; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[2:3] op_sel:[1,0]
; GFX942-NEXT: v_mov_b32_e32 v3, v2
; GFX942-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1]
; GFX942-NEXT: s_waitcnt vmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/shufflevector.v4p3.v3p3.ll b/llvm/test/CodeGen/AMDGPU/shufflevector.v4p3.v3p3.ll
index 8039e12..aa9e23b 100644
--- a/llvm/test/CodeGen/AMDGPU/shufflevector.v4p3.v3p3.ll
+++ b/llvm/test/CodeGen/AMDGPU/shufflevector.v4p3.v3p3.ll
@@ -3272,9 +3272,8 @@ define void @v_shuffle_v4p3_v3p3__1_2_2_2(ptr addrspace(1) inreg %ptr) {
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; def v[0:2]
; GFX90A-NEXT: ;;#ASMEND
-; GFX90A-NEXT: v_mov_b32_e32 v0, v2
; GFX90A-NEXT: v_mov_b32_e32 v4, 0
-; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[0:1] op_sel:[1,0]
+; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[2:3] op_sel:[1,0]
; GFX90A-NEXT: v_mov_b32_e32 v3, v2
; GFX90A-NEXT: global_store_dwordx4 v4, v[0:3], s[16:17]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -3287,8 +3286,7 @@ define void @v_shuffle_v4p3_v3p3__1_2_2_2(ptr addrspace(1) inreg %ptr) {
; GFX942-NEXT: ; def v[0:2]
; GFX942-NEXT: ;;#ASMEND
; GFX942-NEXT: v_mov_b32_e32 v4, 0
-; GFX942-NEXT: v_mov_b32_e32 v0, v2
-; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[0:1] op_sel:[1,0]
+; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[2:3] op_sel:[1,0]
; GFX942-NEXT: v_mov_b32_e32 v3, v2
; GFX942-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1]
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -3416,12 +3414,11 @@ define void @v_shuffle_v4p3_v3p3__4_2_2_2(ptr addrspace(1) inreg %ptr) {
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; def v[0:2]
; GFX90A-NEXT: ;;#ASMEND
-; GFX90A-NEXT: v_mov_b32_e32 v0, v2
; GFX90A-NEXT: v_mov_b32_e32 v7, 0
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; def v[4:6]
; GFX90A-NEXT: ;;#ASMEND
-; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[0:1] op_sel:[1,0]
+; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[2:3] op_sel:[1,0]
; GFX90A-NEXT: v_mov_b32_e32 v3, v2
; GFX90A-NEXT: global_store_dwordx4 v7, v[0:3], s[16:17]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -3434,12 +3431,12 @@ define void @v_shuffle_v4p3_v3p3__4_2_2_2(ptr addrspace(1) inreg %ptr) {
; GFX942-NEXT: ; def v[0:2]
; GFX942-NEXT: ;;#ASMEND
; GFX942-NEXT: v_mov_b32_e32 v7, 0
-; GFX942-NEXT: v_mov_b32_e32 v0, v2
; GFX942-NEXT: ;;#ASMSTART
; GFX942-NEXT: ; def v[4:6]
; GFX942-NEXT: ;;#ASMEND
+; GFX942-NEXT: s_nop 0
+; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[2:3] op_sel:[1,0]
; GFX942-NEXT: v_mov_b32_e32 v3, v2
-; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[4:5], v[0:1] op_sel:[1,0]
; GFX942-NEXT: global_store_dwordx4 v7, v[0:3], s[0:1]
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: s_setpc_b64 s[30:31]
@@ -6083,9 +6080,8 @@ define void @v_shuffle_v4p3_v3p3__1_5_5_5(ptr addrspace(1) inreg %ptr) {
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; def v[0:2]
; GFX90A-NEXT: ;;#ASMEND
-; GFX90A-NEXT: v_mov_b32_e32 v0, v2
; GFX90A-NEXT: v_mov_b32_e32 v5, 0
-; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[0:1] op_sel:[1,0]
+; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[2:3] op_sel:[1,0]
; GFX90A-NEXT: v_mov_b32_e32 v3, v2
; GFX90A-NEXT: global_store_dwordx4 v5, v[0:3], s[16:17]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -6102,8 +6098,7 @@ define void @v_shuffle_v4p3_v3p3__1_5_5_5(ptr addrspace(1) inreg %ptr) {
; GFX942-NEXT: ; def v[0:2]
; GFX942-NEXT: ;;#ASMEND
; GFX942-NEXT: s_nop 0
-; GFX942-NEXT: v_mov_b32_e32 v0, v2
-; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[0:1] op_sel:[1,0]
+; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[2:3], v[2:3] op_sel:[1,0]
; GFX942-NEXT: v_mov_b32_e32 v3, v2
; GFX942-NEXT: global_store_dwordx4 v5, v[0:3], s[0:1]
; GFX942-NEXT: s_waitcnt vmcnt(0)
@@ -6241,9 +6236,8 @@ define void @v_shuffle_v4p3_v3p3__4_5_5_5(ptr addrspace(1) inreg %ptr) {
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; def v[0:2]
; GFX90A-NEXT: ;;#ASMEND
-; GFX90A-NEXT: v_mov_b32_e32 v0, v2
; GFX90A-NEXT: v_mov_b32_e32 v4, 0
-; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[0:1] op_sel:[1,0]
+; GFX90A-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[2:3] op_sel:[1,0]
; GFX90A-NEXT: v_mov_b32_e32 v3, v2
; GFX90A-NEXT: global_store_dwordx4 v4, v[0:3], s[16:17]
; GFX90A-NEXT: s_waitcnt vmcnt(0)
@@ -6256,8 +6250,7 @@ define void @v_shuffle_v4p3_v3p3__4_5_5_5(ptr addrspace(1) inreg %ptr) {
; GFX942-NEXT: ; def v[0:2]
; GFX942-NEXT: ;;#ASMEND
; GFX942-NEXT: v_mov_b32_e32 v4, 0
-; GFX942-NEXT: v_mov_b32_e32 v0, v2
-; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[0:1] op_sel:[1,0]
+; GFX942-NEXT: v_pk_mov_b32 v[0:1], v[0:1], v[2:3] op_sel:[1,0]
; GFX942-NEXT: v_mov_b32_e32 v3, v2
; GFX942-NEXT: global_store_dwordx4 v4, v[0:3], s[0:1]
; GFX942-NEXT: s_waitcnt vmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/sign_extend.ll b/llvm/test/CodeGen/AMDGPU/sign_extend.ll
index cb8bbde..ece46b5 100644
--- a/llvm/test/CodeGen/AMDGPU/sign_extend.ll
+++ b/llvm/test/CodeGen/AMDGPU/sign_extend.ll
@@ -6,29 +6,27 @@ define amdgpu_kernel void @s_sext_i1_to_i32(ptr addrspace(1) %out, i32 %a, i32 %
; SI-LABEL: s_sext_i1_to_i32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_cmp_eq_u32 s2, s3
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: s_cselect_b64 s[0:1], -1, 0
-; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[0:1]
-; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_cmp_eq_u32 s4, s5
+; SI-NEXT: s_cselect_b64 s[4:5], -1, 0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[4:5]
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: s_sext_i1_to_i32:
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT: s_mov_b32 s7, 0xf000
-; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: s_cmp_eq_u32 s2, s3
-; VI-NEXT: s_mov_b32 s4, s0
-; VI-NEXT: s_mov_b32 s5, s1
-; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
-; VI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[0:1]
-; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; VI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; VI-NEXT: s_cmp_eq_u32 s4, s5
+; VI-NEXT: s_cselect_b64 s[4:5], -1, 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[4:5]
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
%cmp = icmp eq i32 %a, %b
%sext = sext i1 %cmp to i32
@@ -78,31 +76,29 @@ define amdgpu_kernel void @s_sext_i1_to_i64(ptr addrspace(1) %out, i32 %a, i32 %
; SI-LABEL: s_sext_i1_to_i64:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_cmp_eq_u32 s2, s3
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: s_cselect_b64 s[0:1], -1, 0
-; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[0:1]
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_cmp_eq_u32 s4, s5
+; SI-NEXT: s_cselect_b64 s[4:5], -1, 0
+; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[4:5]
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: v_mov_b32_e32 v1, v0
-; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: s_sext_i1_to_i64:
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT: s_mov_b32 s7, 0xf000
-; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: s_cmp_eq_u32 s2, s3
-; VI-NEXT: s_mov_b32 s4, s0
-; VI-NEXT: s_mov_b32 s5, s1
-; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
-; VI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[0:1]
+; VI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; VI-NEXT: s_cmp_eq_u32 s4, s5
+; VI-NEXT: s_cselect_b64 s[4:5], -1, 0
+; VI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[4:5]
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: v_mov_b32_e32 v1, v0
-; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; VI-NEXT: s_endpgm
%cmp = icmp eq i32 %a, %b
%sext = sext i1 %cmp to i64
@@ -218,29 +214,27 @@ define amdgpu_kernel void @s_sext_i1_to_i16(ptr addrspace(1) %out, i32 %a, i32 %
; SI-LABEL: s_sext_i1_to_i16:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_cmp_eq_u32 s2, s3
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: s_cselect_b64 s[0:1], -1, 0
-; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[0:1]
-; SI-NEXT: buffer_store_short v0, off, s[4:7], 0
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_cmp_eq_u32 s4, s5
+; SI-NEXT: s_cselect_b64 s[4:5], -1, 0
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[4:5]
+; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: s_sext_i1_to_i16:
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT: s_mov_b32 s7, 0xf000
-; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: s_cmp_eq_u32 s2, s3
-; VI-NEXT: s_mov_b32 s4, s0
-; VI-NEXT: s_mov_b32 s5, s1
-; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
-; VI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[0:1]
-; VI-NEXT: buffer_store_short v0, off, s[4:7], 0
+; VI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; VI-NEXT: s_cmp_eq_u32 s4, s5
+; VI-NEXT: s_cselect_b64 s[4:5], -1, 0
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[4:5]
+; VI-NEXT: buffer_store_short v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
%cmp = icmp eq i32 %a, %b
%sext = sext i1 %cmp to i16
diff --git a/llvm/test/CodeGen/AMDGPU/skip-if-dead.ll b/llvm/test/CodeGen/AMDGPU/skip-if-dead.ll
index 5461532..e836366 100644
--- a/llvm/test/CodeGen/AMDGPU/skip-if-dead.ll
+++ b/llvm/test/CodeGen/AMDGPU/skip-if-dead.ll
@@ -1797,8 +1797,8 @@ define amdgpu_ps void @complex_loop(i32 inreg %cmpa, i32 %cmpb, i32 %cmpc) {
; GFX10-WAVE32-NEXT: s_cbranch_scc1 .LBB15_7
; GFX10-WAVE32-NEXT: ; %bb.1: ; %.lr.ph
; GFX10-WAVE32-NEXT: s_mov_b32 s1, exec_lo
-; GFX10-WAVE32-NEXT: s_mov_b32 s0, 0
; GFX10-WAVE32-NEXT: s_mov_b32 s2, 0
+; GFX10-WAVE32-NEXT: s_mov_b32 s0, 0
; GFX10-WAVE32-NEXT: s_branch .LBB15_3
; GFX10-WAVE32-NEXT: .LBB15_2: ; %latch
; GFX10-WAVE32-NEXT: ; in Loop: Header=BB15_3 Depth=1
diff --git a/llvm/test/CodeGen/AMDGPU/sminmax.v2i16.ll b/llvm/test/CodeGen/AMDGPU/sminmax.v2i16.ll
index 4799876..76f8f48 100644
--- a/llvm/test/CodeGen/AMDGPU/sminmax.v2i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/sminmax.v2i16.ll
@@ -369,42 +369,41 @@ define amdgpu_kernel void @s_abs_v4i16(ptr addrspace(1) %out, <4 x i16> %val) #0
; CI-LABEL: s_abs_v4i16:
; CI: ; %bb.0:
; CI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; CI-NEXT: s_mov_b32 s7, 0xf000
-; CI-NEXT: s_mov_b32 s6, -1
; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: s_mov_b32 s4, s0
-; CI-NEXT: s_mov_b32 s5, s1
-; CI-NEXT: s_ashr_i32 s0, s3, 16
-; CI-NEXT: s_ashr_i32 s1, s2, 16
-; CI-NEXT: s_lshr_b32 s8, s2, 16
-; CI-NEXT: s_lshr_b32 s9, s3, 16
-; CI-NEXT: s_sext_i32_i16 s10, s3
-; CI-NEXT: s_sext_i32_i16 s11, s2
-; CI-NEXT: s_sub_i32 s3, 0, s3
-; CI-NEXT: s_sub_i32 s2, 0, s2
-; CI-NEXT: s_sext_i32_i16 s3, s3
-; CI-NEXT: s_sext_i32_i16 s2, s2
+; CI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; CI-NEXT: s_ashr_i32 s6, s5, 16
+; CI-NEXT: s_lshr_b32 s9, s5, 16
+; CI-NEXT: s_sext_i32_i16 s10, s5
+; CI-NEXT: s_sub_i32 s5, 0, s5
+; CI-NEXT: s_ashr_i32 s7, s4, 16
+; CI-NEXT: s_lshr_b32 s8, s4, 16
+; CI-NEXT: s_sext_i32_i16 s11, s4
+; CI-NEXT: s_sext_i32_i16 s5, s5
+; CI-NEXT: s_sub_i32 s4, 0, s4
; CI-NEXT: s_sub_i32 s9, 0, s9
-; CI-NEXT: s_sub_i32 s8, 0, s8
+; CI-NEXT: s_sext_i32_i16 s4, s4
; CI-NEXT: s_sext_i32_i16 s9, s9
+; CI-NEXT: s_sub_i32 s8, 0, s8
+; CI-NEXT: s_max_i32 s5, s10, s5
; CI-NEXT: s_sext_i32_i16 s8, s8
-; CI-NEXT: s_max_i32 s2, s11, s2
-; CI-NEXT: s_max_i32 s3, s10, s3
-; CI-NEXT: s_max_i32 s1, s1, s8
-; CI-NEXT: s_max_i32 s0, s0, s9
-; CI-NEXT: s_add_i32 s3, s3, 2
-; CI-NEXT: s_add_i32 s2, s2, 2
-; CI-NEXT: s_lshl_b32 s0, s0, 16
-; CI-NEXT: s_and_b32 s3, s3, 0xffff
-; CI-NEXT: s_lshl_b32 s1, s1, 16
-; CI-NEXT: s_and_b32 s2, s2, 0xffff
-; CI-NEXT: s_or_b32 s0, s0, s3
-; CI-NEXT: s_or_b32 s1, s1, s2
-; CI-NEXT: s_add_i32 s0, s0, 0x20000
-; CI-NEXT: s_add_i32 s1, s1, 0x20000
-; CI-NEXT: v_mov_b32_e32 v0, s1
-; CI-NEXT: v_mov_b32_e32 v1, s0
-; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; CI-NEXT: s_max_i32 s6, s6, s9
+; CI-NEXT: s_max_i32 s4, s11, s4
+; CI-NEXT: s_add_i32 s5, s5, 2
+; CI-NEXT: s_max_i32 s7, s7, s8
+; CI-NEXT: s_lshl_b32 s6, s6, 16
+; CI-NEXT: s_and_b32 s5, s5, 0xffff
+; CI-NEXT: s_add_i32 s4, s4, 2
+; CI-NEXT: s_or_b32 s5, s6, s5
+; CI-NEXT: s_lshl_b32 s6, s7, 16
+; CI-NEXT: s_and_b32 s4, s4, 0xffff
+; CI-NEXT: s_or_b32 s4, s6, s4
+; CI-NEXT: s_add_i32 s5, s5, 0x20000
+; CI-NEXT: s_add_i32 s4, s4, 0x20000
+; CI-NEXT: s_mov_b32 s3, 0xf000
+; CI-NEXT: s_mov_b32 s2, -1
+; CI-NEXT: v_mov_b32_e32 v0, s4
+; CI-NEXT: v_mov_b32_e32 v1, s5
+; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; CI-NEXT: s_endpgm
%z0 = insertelement <4 x i16> poison, i16 0, i16 0
%z1 = insertelement <4 x i16> %z0, i16 0, i16 1
diff --git a/llvm/test/CodeGen/AMDGPU/sub.ll b/llvm/test/CodeGen/AMDGPU/sub.ll
index 5c113d8..0a51601 100644
--- a/llvm/test/CodeGen/AMDGPU/sub.ll
+++ b/llvm/test/CodeGen/AMDGPU/sub.ll
@@ -11,14 +11,13 @@ define amdgpu_kernel void @s_sub_i32(ptr addrspace(1) %out, i32 %a, i32 %b) {
; GFX6-LABEL: s_sub_i32:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX6-NEXT: s_mov_b32 s7, 0xf000
-; GFX6-NEXT: s_mov_b32 s6, -1
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: s_mov_b32 s4, s0
-; GFX6-NEXT: s_sub_i32 s0, s2, s3
-; GFX6-NEXT: s_mov_b32 s5, s1
-; GFX6-NEXT: v_mov_b32_e32 v0, s0
-; GFX6-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX6-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GFX6-NEXT: s_sub_i32 s4, s4, s5
+; GFX6-NEXT: s_mov_b32 s3, 0xf000
+; GFX6-NEXT: s_mov_b32 s2, -1
+; GFX6-NEXT: v_mov_b32_e32 v0, s4
+; GFX6-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX6-NEXT: s_endpgm
;
; GFX8-LABEL: s_sub_i32:
diff --git a/llvm/test/CodeGen/AMDGPU/sub.v2i16.ll b/llvm/test/CodeGen/AMDGPU/sub.v2i16.ll
index 6a273e5..82ef28f 100644
--- a/llvm/test/CodeGen/AMDGPU/sub.v2i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/sub.v2i16.ll
@@ -223,44 +223,39 @@ define amdgpu_kernel void @s_test_sub_v2i16_kernarg(ptr addrspace(1) %out, <2 x
; VI-LABEL: s_test_sub_v2i16_kernarg:
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT: s_mov_b32 s7, 0xf000
-; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: s_mov_b32 s4, s0
-; VI-NEXT: s_mov_b32 s5, s1
-; VI-NEXT: s_lshr_b32 s0, s3, 16
-; VI-NEXT: s_lshr_b32 s1, s2, 16
-; VI-NEXT: s_sub_i32 s0, s1, s0
-; VI-NEXT: s_sub_i32 s1, s2, s3
-; VI-NEXT: s_lshl_b32 s0, s0, 16
-; VI-NEXT: s_and_b32 s1, s1, 0xffff
-; VI-NEXT: s_or_b32 s0, s1, s0
-; VI-NEXT: v_mov_b32_e32 v0, s0
-; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; VI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; VI-NEXT: s_lshr_b32 s6, s5, 16
+; VI-NEXT: s_lshr_b32 s7, s4, 16
+; VI-NEXT: s_sub_i32 s4, s4, s5
+; VI-NEXT: s_sub_i32 s5, s7, s6
+; VI-NEXT: s_lshl_b32 s5, s5, 16
+; VI-NEXT: s_and_b32 s4, s4, 0xffff
+; VI-NEXT: s_or_b32 s4, s4, s5
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: v_mov_b32_e32 v0, s4
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX10-LABEL: s_test_sub_v2i16_kernarg:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX10-NEXT: s_mov_b32 s7, 0x31016000
-; GFX10-NEXT: s_mov_b32 s6, -1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: v_pk_sub_i16 v0, s2, s3
-; GFX10-NEXT: s_mov_b32 s4, s0
-; GFX10-NEXT: s_mov_b32 s5, s1
-; GFX10-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX10-NEXT: s_mov_b32 s3, 0x31016000
+; GFX10-NEXT: s_mov_b32 s2, -1
+; GFX10-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: s_test_sub_v2i16_kernarg:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX11-NEXT: s_mov_b32 s7, 0x31016000
-; GFX11-NEXT: s_mov_b32 s6, -1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: v_pk_sub_i16 v0, s2, s3
-; GFX11-NEXT: s_mov_b32 s4, s0
-; GFX11-NEXT: s_mov_b32 s5, s1
-; GFX11-NEXT: buffer_store_b32 v0, off, s[4:7], 0
+; GFX11-NEXT: s_mov_b32 s3, 0x31016000
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: buffer_store_b32 v0, off, s[0:3], 0
; GFX11-NEXT: s_endpgm
%add = sub <2 x i16> %a, %b
store <2 x i16> %add, ptr addrspace(1) %out
diff --git a/llvm/test/CodeGen/AMDGPU/udiv.ll b/llvm/test/CodeGen/AMDGPU/udiv.ll
index 063c56f..1f93bf7 100644
--- a/llvm/test/CodeGen/AMDGPU/udiv.ll
+++ b/llvm/test/CodeGen/AMDGPU/udiv.ll
@@ -189,67 +189,65 @@ define amdgpu_kernel void @s_udiv_i32(ptr addrspace(1) %out, i32 %a, i32 %b) {
; SI-LABEL: s_udiv_i32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: v_cvt_f32_u32_e32 v0, s3
-; SI-NEXT: s_sub_i32 s4, 0, s3
-; SI-NEXT: s_mov_b32 s5, s1
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: v_cvt_f32_u32_e32 v0, s5
+; SI-NEXT: s_sub_i32 s2, 0, s5
+; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: v_rcp_iflag_f32_e32 v0, v0
; SI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
; SI-NEXT: v_cvt_u32_f32_e32 v0, v0
-; SI-NEXT: v_mul_lo_u32 v1, s4, v0
-; SI-NEXT: s_mov_b32 s4, s0
+; SI-NEXT: v_mul_lo_u32 v1, s2, v0
+; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: v_mul_hi_u32 v1, v0, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, v0, v1
-; SI-NEXT: v_mul_hi_u32 v0, s2, v0
-; SI-NEXT: v_readfirstlane_b32 s0, v0
-; SI-NEXT: s_mul_i32 s0, s0, s3
-; SI-NEXT: s_sub_i32 s0, s2, s0
-; SI-NEXT: s_sub_i32 s1, s0, s3
+; SI-NEXT: v_mul_hi_u32 v0, s4, v0
+; SI-NEXT: v_readfirstlane_b32 s6, v0
+; SI-NEXT: s_mul_i32 s6, s6, s5
+; SI-NEXT: s_sub_i32 s4, s4, s6
+; SI-NEXT: s_sub_i32 s6, s4, s5
; SI-NEXT: v_add_i32_e32 v1, vcc, 1, v0
-; SI-NEXT: s_cmp_ge_u32 s0, s3
+; SI-NEXT: s_cmp_ge_u32 s4, s5
; SI-NEXT: s_cselect_b64 vcc, -1, 0
; SI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; SI-NEXT: s_cselect_b32 s0, s1, s0
+; SI-NEXT: s_cselect_b32 s4, s6, s4
; SI-NEXT: v_add_i32_e32 v1, vcc, 1, v0
-; SI-NEXT: s_cmp_ge_u32 s0, s3
+; SI-NEXT: s_cmp_ge_u32 s4, s5
; SI-NEXT: s_cselect_b64 vcc, -1, 0
; SI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: s_udiv_i32:
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; VI-NEXT: s_mov_b32 s7, 0xf000
-; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: v_cvt_f32_u32_e32 v0, s3
-; VI-NEXT: s_sub_i32 s4, 0, s3
-; VI-NEXT: s_mov_b32 s5, s1
+; VI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; VI-NEXT: v_cvt_f32_u32_e32 v0, s5
+; VI-NEXT: s_sub_i32 s2, 0, s5
+; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: v_rcp_iflag_f32_e32 v0, v0
; VI-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
; VI-NEXT: v_cvt_u32_f32_e32 v0, v0
-; VI-NEXT: v_mul_lo_u32 v1, s4, v0
-; VI-NEXT: s_mov_b32 s4, s0
+; VI-NEXT: v_mul_lo_u32 v1, s2, v0
+; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: v_mul_hi_u32 v1, v0, v1
; VI-NEXT: v_add_u32_e32 v0, vcc, v0, v1
-; VI-NEXT: v_mul_hi_u32 v0, s2, v0
-; VI-NEXT: v_readfirstlane_b32 s0, v0
-; VI-NEXT: s_mul_i32 s0, s0, s3
-; VI-NEXT: s_sub_i32 s0, s2, s0
-; VI-NEXT: s_sub_i32 s1, s0, s3
+; VI-NEXT: v_mul_hi_u32 v0, s4, v0
+; VI-NEXT: v_readfirstlane_b32 s6, v0
+; VI-NEXT: s_mul_i32 s6, s6, s5
+; VI-NEXT: s_sub_i32 s4, s4, s6
+; VI-NEXT: s_sub_i32 s6, s4, s5
; VI-NEXT: v_add_u32_e32 v1, vcc, 1, v0
-; VI-NEXT: s_cmp_ge_u32 s0, s3
+; VI-NEXT: s_cmp_ge_u32 s4, s5
; VI-NEXT: s_cselect_b64 vcc, -1, 0
; VI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; VI-NEXT: s_cselect_b32 s0, s1, s0
+; VI-NEXT: s_cselect_b32 s4, s6, s4
; VI-NEXT: v_add_u32_e32 v1, vcc, 1, v0
-; VI-NEXT: s_cmp_ge_u32 s0, s3
+; VI-NEXT: s_cmp_ge_u32 s4, s5
; VI-NEXT: s_cselect_b64 vcc, -1, 0
; VI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
-; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GCN-LABEL: s_udiv_i32:
diff --git a/llvm/test/CodeGen/AMDGPU/udiv64.ll b/llvm/test/CodeGen/AMDGPU/udiv64.ll
index 775483c..1c50f93 100644
--- a/llvm/test/CodeGen/AMDGPU/udiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/udiv64.ll
@@ -716,8 +716,6 @@ define amdgpu_kernel void @s_test_udiv24_i48(ptr addrspace(1) %out, i48 %x, i48
; GCN: ; %bb.0:
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GCN-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
-; GCN-NEXT: s_mov_b32 s7, 0xf000
-; GCN-NEXT: s_mov_b32 s6, -1
; GCN-NEXT: v_mov_b32_e32 v3, 0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_and_b32 s3, s3, 0xffff
@@ -729,25 +727,23 @@ define amdgpu_kernel void @s_test_udiv24_i48(ptr addrspace(1) %out, i48 %x, i48
; GCN-NEXT: s_lshr_b64 s[2:3], s[2:3], 24
; GCN-NEXT: v_cvt_f32_u32_e32 v1, s2
; GCN-NEXT: v_rcp_iflag_f32_e32 v2, v0
-; GCN-NEXT: s_mov_b32 s4, s0
-; GCN-NEXT: s_mov_b32 s5, s1
+; GCN-NEXT: s_mov_b32 s3, 0xf000
+; GCN-NEXT: s_mov_b32 s2, -1
; GCN-NEXT: v_mul_f32_e32 v2, v1, v2
; GCN-NEXT: v_trunc_f32_e32 v2, v2
+; GCN-NEXT: v_cvt_u32_f32_e32 v4, v2
; GCN-NEXT: v_mad_f32 v1, -v2, v0, v1
-; GCN-NEXT: v_cvt_u32_f32_e32 v2, v2
; GCN-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0
-; GCN-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; GCN-NEXT: v_addc_u32_e32 v0, vcc, 0, v4, vcc
; GCN-NEXT: v_and_b32_e32 v0, 0xffffff, v0
-; GCN-NEXT: buffer_store_short v3, off, s[4:7], 0 offset:4
-; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GCN-NEXT: buffer_store_short v3, off, s[0:3], 0 offset:4
+; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GCN-NEXT: s_endpgm
;
; GCN-IR-LABEL: s_test_udiv24_i48:
; GCN-IR: ; %bb.0:
; GCN-IR-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GCN-IR-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
-; GCN-IR-NEXT: s_mov_b32 s7, 0xf000
-; GCN-IR-NEXT: s_mov_b32 s6, -1
; GCN-IR-NEXT: v_mov_b32_e32 v3, 0
; GCN-IR-NEXT: s_waitcnt lgkmcnt(0)
; GCN-IR-NEXT: s_and_b32 s3, s3, 0xffff
@@ -759,17 +755,17 @@ define amdgpu_kernel void @s_test_udiv24_i48(ptr addrspace(1) %out, i48 %x, i48
; GCN-IR-NEXT: s_lshr_b64 s[2:3], s[2:3], 24
; GCN-IR-NEXT: v_cvt_f32_u32_e32 v1, s2
; GCN-IR-NEXT: v_rcp_iflag_f32_e32 v2, v0
-; GCN-IR-NEXT: s_mov_b32 s4, s0
-; GCN-IR-NEXT: s_mov_b32 s5, s1
+; GCN-IR-NEXT: s_mov_b32 s3, 0xf000
+; GCN-IR-NEXT: s_mov_b32 s2, -1
; GCN-IR-NEXT: v_mul_f32_e32 v2, v1, v2
; GCN-IR-NEXT: v_trunc_f32_e32 v2, v2
+; GCN-IR-NEXT: v_cvt_u32_f32_e32 v4, v2
; GCN-IR-NEXT: v_mad_f32 v1, -v2, v0, v1
-; GCN-IR-NEXT: v_cvt_u32_f32_e32 v2, v2
; GCN-IR-NEXT: v_cmp_ge_f32_e64 vcc, |v1|, v0
-; GCN-IR-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; GCN-IR-NEXT: v_addc_u32_e32 v0, vcc, 0, v4, vcc
; GCN-IR-NEXT: v_and_b32_e32 v0, 0xffffff, v0
-; GCN-IR-NEXT: buffer_store_short v3, off, s[4:7], 0 offset:4
-; GCN-IR-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GCN-IR-NEXT: buffer_store_short v3, off, s[0:3], 0 offset:4
+; GCN-IR-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GCN-IR-NEXT: s_endpgm
%1 = lshr i48 %x, 24
%2 = lshr i48 %y, 24
diff --git a/llvm/test/CodeGen/AMDGPU/while-break.ll b/llvm/test/CodeGen/AMDGPU/while-break.ll
index 19c8e84..2b7e283 100644
--- a/llvm/test/CodeGen/AMDGPU/while-break.ll
+++ b/llvm/test/CodeGen/AMDGPU/while-break.ll
@@ -157,8 +157,8 @@ define amdgpu_ps < 2 x float> @while_break_two_chains_of_phi(float %v, i32 %x, i
; GCN-LABEL: while_break_two_chains_of_phi:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: v_mov_b32_e32 v6, 0
-; GCN-NEXT: s_mov_b32 s2, 0
; GCN-NEXT: s_mov_b32 s0, 0
+; GCN-NEXT: s_mov_b32 s2, 0
; GCN-NEXT: s_branch .LBB2_2
; GCN-NEXT: .LBB2_1: ; %Flow1
; GCN-NEXT: ; in Loop: Header=BB2_2 Depth=1
diff --git a/llvm/test/CodeGen/AMDGPU/xor.ll b/llvm/test/CodeGen/AMDGPU/xor.ll
index feb6ecd..92280b9 100644
--- a/llvm/test/CodeGen/AMDGPU/xor.ll
+++ b/llvm/test/CodeGen/AMDGPU/xor.ll
@@ -298,14 +298,13 @@ define amdgpu_kernel void @scalar_xor_i32(ptr addrspace(1) %out, i32 %a, i32 %b)
; SI-LABEL: scalar_xor_i32:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; SI-NEXT: s_mov_b32 s7, 0xf000
-; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_xor_b32 s0, s2, s3
-; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: v_mov_b32_e32 v0, s0
-; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
+; SI-NEXT: s_xor_b32 s4, s4, s5
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: scalar_xor_i32:
diff --git a/llvm/test/CodeGen/AMDGPU/zext-divergence-driven-isel.ll b/llvm/test/CodeGen/AMDGPU/zext-divergence-driven-isel.ll
index c393582..d9f5ba9 100644
--- a/llvm/test/CodeGen/AMDGPU/zext-divergence-driven-isel.ll
+++ b/llvm/test/CodeGen/AMDGPU/zext-divergence-driven-isel.ll
@@ -5,15 +5,14 @@ define amdgpu_kernel void @zext_i16_to_i32_uniform(ptr addrspace(1) %out, i16 %a
; GCN-LABEL: zext_i16_to_i32_uniform:
; GCN: ; %bb.0:
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GCN-NEXT: s_mov_b32 s7, 0xf000
-; GCN-NEXT: s_mov_b32 s6, -1
; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: s_mov_b32 s4, s0
-; GCN-NEXT: s_and_b32 s0, s2, 0xffff
-; GCN-NEXT: s_add_i32 s0, s3, s0
-; GCN-NEXT: s_mov_b32 s5, s1
-; GCN-NEXT: v_mov_b32_e32 v0, s0
-; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GCN-NEXT: s_mov_b64 s[4:5], s[2:3]
+; GCN-NEXT: s_and_b32 s4, s4, 0xffff
+; GCN-NEXT: s_add_i32 s4, s5, s4
+; GCN-NEXT: s_mov_b32 s3, 0xf000
+; GCN-NEXT: s_mov_b32 s2, -1
+; GCN-NEXT: v_mov_b32_e32 v0, s4
+; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GCN-NEXT: s_endpgm
%zext = zext i16 %a to i32
%res = add i32 %b, %zext
diff --git a/llvm/test/CodeGen/ARM/llvm.sincospi.ll b/llvm/test/CodeGen/ARM/llvm.sincospi.ll
new file mode 100644
index 0000000..91bf0aa
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/llvm.sincospi.ll
@@ -0,0 +1,249 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -mtriple=thumbv7-apple-ios7.0.0 < %s | FileCheck %s
+
+define { half, half } @test_sincospi_f16(half %a) #0 {
+; CHECK-LABEL: test_sincospi_f16:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: push {r4, lr}
+; CHECK-NEXT: sub sp, #8
+; CHECK-NEXT: bl ___extendhfsf2
+; CHECK-NEXT: add r1, sp, #4
+; CHECK-NEXT: mov r2, sp
+; CHECK-NEXT: bl ___sincospif
+; CHECK-NEXT: ldr r0, [sp, #4]
+; CHECK-NEXT: bl ___truncsfhf2
+; CHECK-NEXT: mov r4, r0
+; CHECK-NEXT: ldr r0, [sp]
+; CHECK-NEXT: bl ___truncsfhf2
+; CHECK-NEXT: mov r1, r0
+; CHECK-NEXT: mov r0, r4
+; CHECK-NEXT: add sp, #8
+; CHECK-NEXT: pop {r4, pc}
+ %result = call { half, half } @llvm.sincospi.f16(half %a)
+ ret { half, half } %result
+}
+
+define half @test_sincospi_f16_only_use_sin(half %a) #0 {
+; CHECK-LABEL: test_sincospi_f16_only_use_sin:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: str lr, [sp, #-4]!
+; CHECK-NEXT: sub sp, #8
+; CHECK-NEXT: bl ___extendhfsf2
+; CHECK-NEXT: add r1, sp, #4
+; CHECK-NEXT: mov r2, sp
+; CHECK-NEXT: bl ___sincospif
+; CHECK-NEXT: ldr r0, [sp, #4]
+; CHECK-NEXT: bl ___truncsfhf2
+; CHECK-NEXT: add sp, #8
+; CHECK-NEXT: ldr lr, [sp], #4
+; CHECK-NEXT: bx lr
+ %result = call { half, half } @llvm.sincospi.f16(half %a)
+ %result.0 = extractvalue { half, half } %result, 0
+ ret half %result.0
+}
+
+define half @test_sincospi_f16_only_use_cos(half %a) #0 {
+; CHECK-LABEL: test_sincospi_f16_only_use_cos:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: str lr, [sp, #-4]!
+; CHECK-NEXT: sub sp, #8
+; CHECK-NEXT: bl ___extendhfsf2
+; CHECK-NEXT: add r1, sp, #4
+; CHECK-NEXT: mov r2, sp
+; CHECK-NEXT: bl ___sincospif
+; CHECK-NEXT: ldr r0, [sp]
+; CHECK-NEXT: bl ___truncsfhf2
+; CHECK-NEXT: add sp, #8
+; CHECK-NEXT: ldr lr, [sp], #4
+; CHECK-NEXT: bx lr
+ %result = call { half, half } @llvm.sincospi.f16(half %a)
+ %result.1 = extractvalue { half, half } %result, 1
+ ret half %result.1
+}
+
+define { <2 x half>, <2 x half> } @test_sincospi_v2f16(<2 x half> %a) #0 {
+; CHECK-LABEL: test_sincospi_v2f16:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: push {r4, lr}
+; CHECK-NEXT: vpush {d8}
+; CHECK-NEXT: sub sp, #24
+; CHECK-NEXT: mov r4, r0
+; CHECK-NEXT: mov r0, r1
+; CHECK-NEXT: bl ___extendhfsf2
+; CHECK-NEXT: add r1, sp, #12
+; CHECK-NEXT: add r2, sp, #8
+; CHECK-NEXT: bl ___sincospif
+; CHECK-NEXT: mov r0, r4
+; CHECK-NEXT: bl ___extendhfsf2
+; CHECK-NEXT: add r1, sp, #4
+; CHECK-NEXT: mov r2, sp
+; CHECK-NEXT: bl ___sincospif
+; CHECK-NEXT: ldr r0, [sp, #12]
+; CHECK-NEXT: bl ___truncsfhf2
+; CHECK-NEXT: ldr r1, [sp, #4]
+; CHECK-NEXT: strh.w r0, [sp, #22]
+; CHECK-NEXT: mov r0, r1
+; CHECK-NEXT: bl ___truncsfhf2
+; CHECK-NEXT: strh.w r0, [sp, #20]
+; CHECK-NEXT: add r0, sp, #20
+; CHECK-NEXT: vld1.32 {d8[0]}, [r0:32]
+; CHECK-NEXT: ldr r0, [sp, #8]
+; CHECK-NEXT: bl ___truncsfhf2
+; CHECK-NEXT: ldr r1, [sp]
+; CHECK-NEXT: strh.w r0, [sp, #18]
+; CHECK-NEXT: mov r0, r1
+; CHECK-NEXT: bl ___truncsfhf2
+; CHECK-NEXT: strh.w r0, [sp, #16]
+; CHECK-NEXT: add r0, sp, #16
+; CHECK-NEXT: vmovl.u16 q9, d8
+; CHECK-NEXT: vld1.32 {d16[0]}, [r0:32]
+; CHECK-NEXT: vmovl.u16 q8, d16
+; CHECK-NEXT: vmov.32 r0, d18[0]
+; CHECK-NEXT: vmov.32 r1, d18[1]
+; CHECK-NEXT: vmov.32 r2, d16[0]
+; CHECK-NEXT: vmov.32 r3, d16[1]
+; CHECK-NEXT: add sp, #24
+; CHECK-NEXT: vpop {d8}
+; CHECK-NEXT: pop {r4, pc}
+ %result = call { <2 x half>, <2 x half> } @llvm.sincospi.v2f16(<2 x half> %a)
+ ret { <2 x half>, <2 x half> } %result
+}
+
+define { float, float } @test_sincospi_f32(float %a) #0 {
+; CHECK-LABEL: test_sincospi_f32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: str lr, [sp, #-4]!
+; CHECK-NEXT: sub sp, #8
+; CHECK-NEXT: add r1, sp, #4
+; CHECK-NEXT: mov r2, sp
+; CHECK-NEXT: bl ___sincospif
+; CHECK-NEXT: ldrd r1, r0, [sp], #8
+; CHECK-NEXT: ldr lr, [sp], #4
+; CHECK-NEXT: bx lr
+ %result = call { float, float } @llvm.sincospi.f32(float %a)
+ ret { float, float } %result
+}
+
+define { <2 x float>, <2 x float> } @test_sincospi_v2f32(<2 x float> %a) #0 {
+; CHECK-LABEL: test_sincospi_v2f32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: str lr, [sp, #-4]!
+; CHECK-NEXT: vpush {d8}
+; CHECK-NEXT: sub sp, #16
+; CHECK-NEXT: vmov d8, r0, r1
+; CHECK-NEXT: add r1, sp, #4
+; CHECK-NEXT: mov r2, sp
+; CHECK-NEXT: vmov r0, s17
+; CHECK-NEXT: bl ___sincospif
+; CHECK-NEXT: vmov r0, s16
+; CHECK-NEXT: add r1, sp, #12
+; CHECK-NEXT: add r2, sp, #8
+; CHECK-NEXT: bl ___sincospif
+; CHECK-NEXT: vldr s1, [sp, #4]
+; CHECK-NEXT: vldr s3, [sp]
+; CHECK-NEXT: vldr s0, [sp, #12]
+; CHECK-NEXT: vldr s2, [sp, #8]
+; CHECK-NEXT: vmov r0, r1, d0
+; CHECK-NEXT: vmov r2, r3, d1
+; CHECK-NEXT: add sp, #16
+; CHECK-NEXT: vpop {d8}
+; CHECK-NEXT: ldr lr, [sp], #4
+; CHECK-NEXT: bx lr
+ %result = call { <2 x float>, <2 x float> } @llvm.sincospi.v2f32(<2 x float> %a)
+ ret { <2 x float>, <2 x float> } %result
+}
+
+define { <3 x float>, <3 x float> } @test_sincospi_v3f32(<3 x float> %a) #0 {
+; CHECK-LABEL: test_sincospi_v3f32:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: push {r4, r5, r6, r7, lr}
+; CHECK-NEXT: sub sp, #16
+; CHECK-NEXT: mov r6, r2
+; CHECK-NEXT: mov r7, r1
+; CHECK-NEXT: add r1, sp, #12
+; CHECK-NEXT: add r2, sp, #8
+; CHECK-NEXT: mov r4, r0
+; CHECK-NEXT: mov r0, r6
+; CHECK-NEXT: mov r5, r3
+; CHECK-NEXT: bl ___sincospif
+; CHECK-NEXT: add r1, sp, #4
+; CHECK-NEXT: mov r2, sp
+; CHECK-NEXT: mov r0, r7
+; CHECK-NEXT: bl ___sincospif
+; CHECK-NEXT: ldr r0, [sp, #36]
+; CHECK-NEXT: vmov d0, r7, r6
+; CHECK-NEXT: mov r1, r4
+; CHECK-NEXT: add.w r2, r4, #16
+; CHECK-NEXT: vmov d1, r5, r0
+; CHECK-NEXT: vmov r0, s2
+; CHECK-NEXT: vldr s1, [sp, #8]
+; CHECK-NEXT: vldr s3, [sp, #12]
+; CHECK-NEXT: vldr s2, [sp, #4]
+; CHECK-NEXT: vldr s0, [sp]
+; CHECK-NEXT: vst1.32 {d1}, [r1:64]!
+; CHECK-NEXT: vst1.32 {d0}, [r2:64]!
+; CHECK-NEXT: bl ___sincospif
+; CHECK-NEXT: add sp, #16
+; CHECK-NEXT: pop {r4, r5, r6, r7, pc}
+ %result = call { <3 x float>, <3 x float> } @llvm.sincospi.v3f32(<3 x float> %a)
+ ret { <3 x float>, <3 x float> } %result
+}
+
+define { double, double } @test_sincospi_f64(double %a) #0 {
+; CHECK-LABEL: test_sincospi_f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: push {r4, r7, lr}
+; CHECK-NEXT: add r7, sp, #4
+; CHECK-NEXT: sub sp, #20
+; CHECK-NEXT: mov r4, sp
+; CHECK-NEXT: bfc r4, #0, #3
+; CHECK-NEXT: mov sp, r4
+; CHECK-NEXT: add r2, sp, #8
+; CHECK-NEXT: mov r3, sp
+; CHECK-NEXT: bl ___sincospi
+; CHECK-NEXT: subs r4, r7, #4
+; CHECK-NEXT: ldrd r0, r1, [sp, #8]
+; CHECK-NEXT: ldrd r2, r3, [sp]
+; CHECK-NEXT: mov sp, r4
+; CHECK-NEXT: pop {r4, r7, pc}
+ %result = call { double, double } @llvm.sincospi.f64(double %a)
+ ret { double, double } %result
+}
+
+define { <2 x double>, <2 x double> } @test_sincospi_v2f64(<2 x double> %a) #0 {
+; CHECK-LABEL: test_sincospi_v2f64:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, lr}
+; CHECK-NEXT: add r7, sp, #16
+; CHECK-NEXT: sub sp, #32
+; CHECK-NEXT: mov r4, sp
+; CHECK-NEXT: bfc r4, #0, #3
+; CHECK-NEXT: mov sp, r4
+; CHECK-NEXT: mov r6, r1
+; CHECK-NEXT: ldr r1, [r7, #8]
+; CHECK-NEXT: mov r5, r3
+; CHECK-NEXT: mov r8, r2
+; CHECK-NEXT: add r2, sp, #24
+; CHECK-NEXT: add r3, sp, #16
+; CHECK-NEXT: mov r4, r0
+; CHECK-NEXT: mov r0, r5
+; CHECK-NEXT: bl ___sincospi
+; CHECK-NEXT: add r2, sp, #8
+; CHECK-NEXT: mov r3, sp
+; CHECK-NEXT: mov r0, r6
+; CHECK-NEXT: mov r1, r8
+; CHECK-NEXT: bl ___sincospi
+; CHECK-NEXT: vldr d19, [sp, #24]
+; CHECK-NEXT: vldr d18, [sp, #8]
+; CHECK-NEXT: vldr d17, [sp, #16]
+; CHECK-NEXT: vldr d16, [sp]
+; CHECK-NEXT: vst1.32 {d18, d19}, [r4]!
+; CHECK-NEXT: vst1.32 {d16, d17}, [r4]
+; CHECK-NEXT: sub.w r4, r7, #16
+; CHECK-NEXT: mov sp, r4
+; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, pc}
+ %result = call { <2 x double>, <2 x double> } @llvm.sincospi.v2f64(<2 x double> %a)
+ ret { <2 x double>, <2 x double> } %result
+}
+
+attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-array-of-struct.ll b/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-array-of-struct.ll
new file mode 100644
index 0000000..22fba8c
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-array-of-struct.ll
@@ -0,0 +1,59 @@
+; RUN: opt -S -dxil-resource-access -mtriple=dxil %s | FileCheck %s
+;
+; Tests for indexed types in dynamically indexed arrays in cbuffers.
+;
+; struct S {
+; float x[2];
+; uint q;
+; };
+; cbuffer CB : register(b0) {
+; uint32_t3 w[3]; // offset 0, size 12 (+4) * 3
+; S v[3]; // offset 48, size 24 (+8) * 3
+; }
+%S = type <{ <{ [1 x <{ float, target("dx.Padding", 12) }>], float }>, i32 }>
+%__cblayout_CB = type <{
+ <{
+ [2 x <{ <3 x i32>, target("dx.Padding", 4) }>],
+ <3 x i32>
+ }>,
+ target("dx.Padding", 4),
+ <{
+ [2 x <{ %S, target("dx.Padding", 8) }>], %S
+ }>
+}>
+
+@CB.cb = local_unnamed_addr global target("dx.CBuffer", %__cblayout_CB) poison
+
+; CHECK: define void @f
+define void @f(ptr %dst, i32 %idx) {
+entry:
+ %CB.cb_h = tail call target("dx.CBuffer", %__cblayout_CB) @llvm.dx.resource.handlefromimplicitbinding(i32 1, i32 0, i32 1, i32 0, ptr null)
+ store target("dx.CBuffer", %__cblayout_CB) %CB.cb_h, ptr @CB.cb, align 4
+
+ ; CHECK: [[CB:%.*]] = load target("dx.CBuffer", %__cblayout_CB), ptr @CB.cb
+ %CB.cb = load target("dx.CBuffer", %__cblayout_CB), ptr @CB.cb, align 4
+
+ ;; w[2].z
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { i32, i32, i32, i32 } @llvm.dx.resource.load.cbufferrow.4.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 2)
+ ; CHECK: [[X:%.*]] = extractvalue { i32, i32, i32, i32 } [[LOAD]], 2
+ ; CHECK: store i32 [[X]], ptr %dst
+ %w_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 0)
+ %w_gep = getelementptr inbounds nuw i8, ptr addrspace(2) %w_ptr, i32 40
+ %w_load = load i32, ptr addrspace(2) %w_gep, align 4
+ store i32 %w_load, ptr %dst, align 4
+
+ ;; v[2].q
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { i32, i32, i32, i32 } @llvm.dx.resource.load.cbufferrow.4.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 8)
+ ; CHECK: [[X:%.*]] = extractvalue { i32, i32, i32, i32 } [[LOAD]], 1
+ ; CHECK: [[PTR:%.*]] = getelementptr inbounds nuw i8, ptr %dst, i32 4
+ ; CHECK: store i32 [[X]], ptr [[PTR]]
+ %v_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 48)
+ %v_gep = getelementptr inbounds nuw i8, ptr addrspace(2) %v_ptr, i32 84
+ %v_load = load i32, ptr addrspace(2) %v_gep, align 4
+ %v.i = getelementptr inbounds nuw i8, ptr %dst, i32 4
+ store i32 %v_load, ptr %v.i, align 4
+
+ ret void
+}
diff --git a/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-array-of-vector.ll b/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-array-of-vector.ll
new file mode 100644
index 0000000..615fc5e
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-array-of-vector.ll
@@ -0,0 +1,49 @@
+; RUN: opt -S -dxil-resource-access -mtriple=dxil %s | FileCheck %s
+;
+; Test for when we have indices into both the array and the vector: ie, s[1][3]
+
+; cbuffer CB : register(b0) {
+; uint4 s[3]; // offset 0, size 16 * 3
+; }
+%__cblayout_CB = type <{ [2 x <4 x i32>] }>
+
+@CB.cb = local_unnamed_addr global target("dx.CBuffer", %__cblayout_CB) poison
+
+; CHECK: define void @f
+define void @f(ptr %dst) {
+entry:
+ %CB.cb_h = tail call target("dx.CBuffer", %__cblayout_CB) @llvm.dx.resource.handlefromimplicitbinding(i32 1, i32 0, i32 1, i32 0, ptr null)
+ store target("dx.CBuffer", %__cblayout_CB) %CB.cb_h, ptr @CB.cb, align 4
+
+ ; CHECK: [[CB:%.*]] = load target("dx.CBuffer", %__cblayout_CB), ptr @CB.cb
+ %CB.cb = load target("dx.CBuffer", %__cblayout_CB), ptr @CB.cb, align 4
+
+ ;; s[1][3]
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { i32, i32, i32, i32 } @llvm.dx.resource.load.cbufferrow.4.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 1)
+ ; CHECK: [[X:%.*]] = extractvalue { i32, i32, i32, i32 } [[LOAD]], 3
+ ; CHECK: store i32 [[X]], ptr %dst
+ %i8_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 0)
+ %i8_gep = getelementptr inbounds nuw i8, ptr addrspace(2) %i8_ptr, i32 28
+ %i8_vecext = load i32, ptr addrspace(2) %i8_gep, align 4
+ store i32 %i8_vecext, ptr %dst, align 4
+
+ ;; s[2].w
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { i32, i32, i32, i32 } @llvm.dx.resource.load.cbufferrow.4.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 2)
+ ; CHECK: [[X:%.*]] = extractvalue { i32, i32, i32, i32 } [[LOAD]], 3
+ ;;
+ ;; It would be nice to avoid the redundant vector creation here, but that's
+ ;; outside of the scope of this pass.
+ ;;
+ ; CHECK: [[X_VEC:%.*]] = insertelement <4 x i32> {{%.*}}, i32 [[X]], i32 3
+ ; CHECK: [[X_EXT:%.*]] = extractelement <4 x i32> [[X_VEC]], i32 3
+ ; CHECK: store i32 [[X_EXT]], ptr %dst
+ %typed_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 0)
+ %typed_gep = getelementptr <4 x i32>, ptr addrspace(2) %typed_ptr, i32 2
+ %typed_load = load <4 x i32>, ptr addrspace(2) %typed_gep, align 16
+ %typed_vecext = extractelement <4 x i32> %typed_load, i32 3
+ store i32 %typed_vecext, ptr %dst, align 4
+
+ ret void
+}
diff --git a/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-array-typedgep.ll b/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-array-typedgep.ll
new file mode 100644
index 0000000..eabc07c
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-array-typedgep.ll
@@ -0,0 +1,30 @@
+; RUN: opt -S -dxil-resource-access -mtriple=dxil %s | FileCheck %s
+
+; cbuffer CB : register(b0) {
+; float a1[3];
+; }
+%__cblayout_CB = type <{ [2 x <{ float, [12 x i8] }>], float }>
+
+@CB.cb = global target("dx.CBuffer", %__cblayout_CB) poison
+
+; CHECK: define void @f
+define void @f(ptr %dst) {
+entry:
+ %CB.cb_h = call target("dx.CBuffer", %__cblayout_CB) @llvm.dx.resource.handlefrombinding(i32 0, i32 0, i32 1, i32 0, ptr null)
+ store target("dx.CBuffer", %__cblayout_CB) %CB.cb_h, ptr @CB.cb, align 4
+
+ ;; a1[1]
+ ;; Note that the valid GEPs of a1 are `0, 0, 0`, `0, 0, 1`, and `0, 1`.
+ ;
+ ; CHECK: [[CB:%.*]] = load target("dx.CBuffer", %__cblayout_CB), ptr @CB.cb
+ ; CHECK: [[LOAD:%.*]] = call { float, float, float, float } @llvm.dx.resource.load.cbufferrow.4.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 1)
+ ; CHECK: [[X:%.*]] = extractvalue { float, float, float, float } [[LOAD]], 0
+ ; CHECK: store float [[X]], ptr %dst
+ %CB.cb = load target("dx.CBuffer", %__cblayout_CB), ptr @CB.cb, align 8
+ %a1_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 0)
+ %a1_gep = getelementptr inbounds <{ [2 x <{ float, [12 x i8] }>], float }>, ptr addrspace(2) %a1_ptr, i32 0, i32 0, i32 1
+ %a1 = load float, ptr addrspace(2) %a1_gep, align 4
+ store float %a1, ptr %dst, align 32
+
+ ret void
+}
diff --git a/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-arrays.ll b/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-arrays.ll
new file mode 100644
index 0000000..6f6166e
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-arrays.ll
@@ -0,0 +1,145 @@
+; RUN: opt -S -dxil-resource-access -mtriple=dxil %s | FileCheck %s
+
+; cbuffer CB : register(b0) {
+; float a1[3]; // offset 0, size 4 (+12) * 3
+; double3 a2[2]; // offset 48, size 24 (+8) * 2
+; float16_t a3[2][2]; // offset 112, size 2 (+14) * 4
+; uint64_t a4[3]; // offset 176, size 8 (+8) * 3
+; int4 a5[2][3][4]; // offset 224, size 16 * 24
+; uint16_t a6[1]; // offset 608, size 2 (+14) * 1
+; int64_t a7[2]; // offset 624, size 8 (+8) * 2
+; bool a8[4]; // offset 656, size 4 (+12) * 4
+; }
+%__cblayout_CB = type <{
+ <{ [2 x <{ float, target("dx.Padding", 12) }>], float }>, target("dx.Padding", 12),
+ <{ [1 x <{ <3 x double>, target("dx.Padding", 8) }>], <3 x double> }>, target("dx.Padding", 8),
+ <{ [3 x <{ half, target("dx.Padding", 14) }>], half }>, target("dx.Padding", 14),
+ <{ [2 x <{ i64, target("dx.Padding", 8) }>], i64 }>, target("dx.Padding", 8),
+ [24 x <4 x i32>],
+ [1 x i16], target("dx.Padding", 14),
+ <{ [1 x <{ i64, target("dx.Padding", 8) }>], i64 }>, target("dx.Padding", 8),
+ <{ [3 x <{ i32, target("dx.Padding", 12) }>], i32 }>
+}>
+
+@CB.cb = local_unnamed_addr global target("dx.CBuffer", %__cblayout_CB) poison
+
+; CHECK: define void @f
+define void @f(ptr %dst) {
+entry:
+ %CB.cb_h.i.i = tail call target("dx.CBuffer", %__cblayout_CB) @llvm.dx.resource.handlefrombinding(i32 0, i32 0, i32 1, i32 0, ptr null)
+ store target("dx.CBuffer", %__cblayout_CB) %CB.cb_h.i.i, ptr @CB.cb, align 4
+
+ ; CHECK: [[CB:%.*]] = load target("dx.CBuffer", %__cblayout_CB), ptr @CB.cb
+ %CB.cb = load target("dx.CBuffer", %__cblayout_CB), ptr @CB.cb, align 4
+
+ ;; a1[1]
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { float, float, float, float } @llvm.dx.resource.load.cbufferrow.4.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 1)
+ ; CHECK: [[X:%.*]] = extractvalue { float, float, float, float } [[LOAD]], 0
+ ; CHECK: store float [[X]], ptr %dst
+ %a1_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 0)
+ %a1_gep = getelementptr inbounds nuw i8, ptr addrspace(2) %a1_ptr, i32 16
+ %a1 = load float, ptr addrspace(2) %a1_gep, align 4
+ store float %a1, ptr %dst, align 32
+
+ ;; a2[1]
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { double, double } @llvm.dx.resource.load.cbufferrow.2.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 5)
+ ; CHECK: [[X:%.*]] = extractvalue { double, double } [[LOAD]], 0
+ ; CHECK: [[Y:%.*]] = extractvalue { double, double } [[LOAD]], 1
+ ; CHECK: [[LOAD:%.*]] = call { double, double } @llvm.dx.resource.load.cbufferrow.2.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 6)
+ ; CHECK: [[Z:%.*]] = extractvalue { double, double } [[LOAD]], 0
+ ; CHECK: [[VEC0:%.*]] = insertelement <3 x double> poison, double [[X]], i32 0
+ ; CHECK: [[VEC1:%.*]] = insertelement <3 x double> [[VEC0]], double [[Y]], i32 1
+ ; CHECK: [[VEC2:%.*]] = insertelement <3 x double> [[VEC1]], double [[Z]], i32 2
+ ; CHECK: [[PTR:%.*]] = getelementptr inbounds nuw i8, ptr %dst, i32 8
+ ; CHECK: store <3 x double> [[VEC2]], ptr [[PTR]]
+ %a2_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 48)
+ %a2_gep = getelementptr inbounds nuw i8, ptr addrspace(2) %a2_ptr, i32 32
+ %a2 = load <3 x double>, ptr addrspace(2) %a2_gep, align 8
+ %a2.i = getelementptr inbounds nuw i8, ptr %dst, i32 8
+ store <3 x double> %a2, ptr %a2.i, align 32
+
+ ;; a3[0][1]
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { half, half, half, half, half, half, half, half } @llvm.dx.resource.load.cbufferrow.8.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 8)
+ ; CHECK: [[X:%.*]] = extractvalue { half, half, half, half, half, half, half, half } [[LOAD]], 0
+ ; CHECK: [[PTR:%.*]] = getelementptr inbounds nuw i8, ptr %dst, i32 32
+ ; CHECK: store half [[X]], ptr [[PTR]]
+ %a3_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 112)
+ %a3_gep = getelementptr inbounds nuw i8, ptr addrspace(2) %a3_ptr, i32 16
+ %a3 = load half, ptr addrspace(2) %a3_gep, align 2
+ %a3.i = getelementptr inbounds nuw i8, ptr %dst, i32 32
+ store half %a3, ptr %a3.i, align 2
+
+ ;; a4[1]
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { i64, i64 } @llvm.dx.resource.load.cbufferrow.2.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 12)
+ ; CHECK: [[X:%.*]] = extractvalue { i64, i64 } [[LOAD]], 0
+ ; CHECK: [[PTR:%.*]] = getelementptr inbounds nuw i8, ptr %dst, i32 40
+ ; CHECK: store i64 [[X]], ptr [[PTR]]
+ %a4_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 176)
+ %a4_gep = getelementptr inbounds nuw i8, ptr addrspace(2) %a4_ptr, i32 16
+ %a4 = load i64, ptr addrspace(2) %a4_gep, align 8
+ %a4.i = getelementptr inbounds nuw i8, ptr %dst, i32 40
+ store i64 %a4, ptr %a4.i, align 8
+
+ ;; a5[1][0][0]
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { i32, i32, i32, i32 } @llvm.dx.resource.load.cbufferrow.4.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 26)
+ ; CHECK: [[X:%.*]] = extractvalue { i32, i32, i32, i32 } [[LOAD]], 0
+ ; CHECK: [[Y:%.*]] = extractvalue { i32, i32, i32, i32 } [[LOAD]], 1
+ ; CHECK: [[Z:%.*]] = extractvalue { i32, i32, i32, i32 } [[LOAD]], 2
+ ; CHECK: [[A:%.*]] = extractvalue { i32, i32, i32, i32 } [[LOAD]], 3
+ ; CHECK: [[VEC0:%.*]] = insertelement <4 x i32> poison, i32 [[X]], i32 0
+ ; CHECK: [[VEC1:%.*]] = insertelement <4 x i32> [[VEC0]], i32 [[Y]], i32 1
+ ; CHECK: [[VEC2:%.*]] = insertelement <4 x i32> [[VEC1]], i32 [[Z]], i32 2
+ ; CHECK: [[VEC3:%.*]] = insertelement <4 x i32> [[VEC2]], i32 [[A]], i32 3
+ ; CHECK: [[PTR:%.*]] = getelementptr inbounds nuw i8, ptr %dst, i32 48
+ ; CHECK: store <4 x i32> [[VEC3]], ptr [[PTR]]
+ %a5_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 224)
+ %a5_gep = getelementptr inbounds nuw i8, ptr addrspace(2) %a5_ptr, i32 192
+ %a5 = load <4 x i32>, ptr addrspace(2) %a5_gep, align 4
+ %a5.i = getelementptr inbounds nuw i8, ptr %dst, i32 48
+ store <4 x i32> %a5, ptr %a5.i, align 4
+
+ ;; a6[0]
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { i16, i16, i16, i16, i16, i16, i16, i16 } @llvm.dx.resource.load.cbufferrow.8.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 38)
+ ; CHECK: [[X:%.*]] = extractvalue { i16, i16, i16, i16, i16, i16, i16, i16 } [[LOAD]], 0
+ ; CHECK: [[PTR:%.*]] = getelementptr inbounds nuw i8, ptr %dst, i32 64
+ ; CHECK: store i16 [[X]], ptr [[PTR]]
+ %a6_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 608)
+ %a6 = load i16, ptr addrspace(2) %a6_ptr, align 2
+ %a6.i = getelementptr inbounds nuw i8, ptr %dst, i32 64
+ store i16 %a6, ptr %a6.i, align 2
+
+ ;; a7[1]
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { i64, i64 } @llvm.dx.resource.load.cbufferrow.2.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 40)
+ ; CHECK: [[X:%.*]] = extractvalue { i64, i64 } [[LOAD]], 0
+ ; CHECK: [[PTR:%.*]] = getelementptr inbounds nuw i8, ptr %dst, i32 72
+ ; CHECK: store i64 [[X]], ptr [[PTR]]
+ %a7_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 624)
+ %a7_gep = getelementptr inbounds nuw i8, ptr addrspace(2) %a7_ptr, i32 16
+ %a7 = load i64, ptr addrspace(2) %a7_gep, align 8
+ %a7.i = getelementptr inbounds nuw i8, ptr %dst, i32 72
+ store i64 %a7, ptr %a7.i, align 8
+
+ ;; a8[1]
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { i32, i32, i32, i32 } @llvm.dx.resource.load.cbufferrow.4.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 42)
+ ; CHECK: [[X:%.*]] = extractvalue { i32, i32, i32, i32 } [[LOAD]], 0
+ ; CHECK: [[PTR:%.*]] = getelementptr inbounds nuw i8, ptr %dst, i32 80
+ ; CHECK: store i32 [[X]], ptr [[PTR]]
+ %a8_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 656)
+ %a8_gep = getelementptr inbounds nuw i8, ptr addrspace(2) %a8_ptr, i32 16
+ %a8 = load i32, ptr addrspace(2) %a8_gep, align 4, !range !0, !noundef !1
+ %a8.i = getelementptr inbounds nuw i8, ptr %dst, i32 80
+ store i32 %a8, ptr %a8.i, align 4
+
+ ret void
+}
+
+!0 = !{i32 0, i32 2}
+!1 = !{}
diff --git a/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-dynamic-struct.ll b/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-dynamic-struct.ll
new file mode 100644
index 0000000..22994cf
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-dynamic-struct.ll
@@ -0,0 +1,64 @@
+; RUN: opt -S -dxil-resource-access -mtriple=dxil %s | FileCheck %s
+;
+; Tests for indexed types in dynamically indexed arrays in cbuffers.
+;
+; Bug https://github.com/llvm/llvm-project/issues/164517
+; XFAIL: *
+;
+; struct S {
+; float x[2];
+; uint q;
+; };
+; cbuffer CB : register(b0) {
+; uint32_t3 w[3]; // offset 0, size 12 (+4) * 3
+; S v[3]; // offset 48, size 24 (+8) * 3
+; }
+%S = type <{ <{ [1 x <{ float, target("dx.Padding", 12) }>], float }>, i32 }>
+%__cblayout_CB = type <{
+ <{
+ [2 x <{ <3 x i32>, target("dx.Padding", 4) }>],
+ <3 x i32>
+ }>,
+ target("dx.Padding", 4),
+ <{
+ [2 x <{ %S, target("dx.Padding", 8) }>], %S
+ }>
+}>
+
+@CB.cb = local_unnamed_addr global target("dx.CBuffer", %__cblayout_CB) poison
+
+; CHECK: define void @f
+define void @f(ptr %dst, i32 %idx) {
+entry:
+ %CB.cb_h = tail call target("dx.CBuffer", %__cblayout_CB) @llvm.dx.resource.handlefromimplicitbinding(i32 1, i32 0, i32 1, i32 0, ptr null)
+ store target("dx.CBuffer", %__cblayout_CB) %CB.cb_h, ptr @CB.cb, align 4
+
+ ; CHECK: [[CB:%.*]] = load target("dx.CBuffer", %__cblayout_CB), ptr @CB.cb
+ %CB.cb = load target("dx.CBuffer", %__cblayout_CB), ptr @CB.cb, align 4
+
+ ;; w[idx].z
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { i32, i32, i32, i32 } @llvm.dx.resource.load.cbufferrow.4.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 %idx)
+ ; CHECK: [[X:%.*]] = extractvalue { i32, i32, i32, i32 } [[LOAD]], 2
+ ; CHECK: store i32 [[X]], ptr %dst
+ %w_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 0)
+ %w_arrayidx = getelementptr <3 x i32>, ptr addrspace(2) %w_ptr, i32 %idx
+ %w_gep = getelementptr inbounds nuw i8, ptr addrspace(2) %w_arrayidx, i32 4
+ %w_load = load i32, ptr addrspace(2) %w_gep, align 4
+ store i32 %w_load, ptr %dst, align 4
+
+ ;; v[idx].q
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { i32, i32, i32, i32 } @llvm.dx.resource.load.cbufferrow.4.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 %idx)
+ ; CHECK: [[X:%.*]] = extractvalue { i32, i32, i32, i32 } [[LOAD]], 1
+ ; CHECK: [[PTR:%.*]] = getelementptr inbounds nuw i8, ptr %dst, i32 4
+ ; CHECK: store i32 [[X]], ptr [[PTR]]
+ %v_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 48)
+ %v_arrayidx = getelementptr <{ %struct.S, target("dx.Padding", 4) }>, ptr addrspace(2) %v_ptr, i32 %idx
+ %v_gep = getelementptr inbounds nuw i8, ptr addrspace(2) %v_arrayidx, i32 8
+ %v_load = load i32, ptr addrspace(2) %v_gep, align 4
+ %v.i = getelementptr inbounds nuw i8, ptr %dst, i32 4
+ store i32 %v_load, ptr %v.i, align 4
+
+ ret void
+}
diff --git a/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-dynamic.ll b/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-dynamic.ll
new file mode 100644
index 0000000..7daebaed
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-dynamic.ll
@@ -0,0 +1,46 @@
+; RUN: opt -S -dxil-resource-access -mtriple=dxil %s | FileCheck %s
+;
+; Tests for dynamic indices into arrays in cbuffers.
+
+; cbuffer CB : register(b0) {
+; uint s[10]; // offset 0, size 4 (+12) * 10
+; uint t[12]; // offset 160, size 4 (+12) * 12
+; }
+%__cblayout_CB = type <{ <{ [9 x <{ i32, target("dx.Padding", 12) }>], i32 }>, target("dx.Padding", 12), <{ [11 x <{ i32, target("dx.Padding", 12) }>], i32 }> }>
+
+@CB.cb = local_unnamed_addr global target("dx.CBuffer", %__cblayout_CB) poison
+
+; CHECK: define void @f
+define void @f(ptr %dst, i32 %idx) {
+entry:
+ %CB.cb_h = tail call target("dx.CBuffer", %__cblayout_CB) @llvm.dx.resource.handlefromimplicitbinding(i32 1, i32 0, i32 1, i32 0, ptr null)
+ store target("dx.CBuffer", %__cblayout_CB) %CB.cb_h, ptr @CB.cb, align 4
+
+ ; CHECK: [[CB:%.*]] = load target("dx.CBuffer", %__cblayout_CB), ptr @CB.cb
+ %CB.cb = load target("dx.CBuffer", %__cblayout_CB), ptr @CB.cb, align 4
+
+ ;; s[idx]
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { i32, i32, i32, i32 } @llvm.dx.resource.load.cbufferrow.4.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 %idx)
+ ; CHECK: [[X:%.*]] = extractvalue { i32, i32, i32, i32 } [[LOAD]], 0
+ ; CHECK: store i32 [[X]], ptr %dst
+ %s_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 0)
+ %s_gep = getelementptr <{ i32, target("dx.Padding", 12) }>, ptr addrspace(2) %s_ptr, i32 %idx
+ %s_load = load i32, ptr addrspace(2) %s_gep, align 4
+ store i32 %s_load, ptr %dst, align 4
+
+ ;; t[idx]
+ ;
+ ; CHECK: [[T_IDX:%.*]] = add i32 10, %idx
+ ; CHECK: [[LOAD:%.*]] = call { i32, i32, i32, i32 } @llvm.dx.resource.load.cbufferrow.4.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 [[T_IDX]])
+ ; CHECK: [[X:%.*]] = extractvalue { i32, i32, i32, i32 } [[LOAD]], 0
+ ; CHECK: [[PTR:%.*]] = getelementptr inbounds nuw i8, ptr %dst, i32 4
+ ; CHECK: store i32 [[X]], ptr [[PTR]]
+ %t_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 160)
+ %t_gep = getelementptr <{ i32, target("dx.Padding", 12) }>, ptr addrspace(2) %t_ptr, i32 %idx
+ %t_load = load i32, ptr addrspace(2) %t_gep, align 4
+ %t.i = getelementptr inbounds nuw i8, ptr %dst, i32 4
+ store i32 %t_load, ptr %t.i, align 4
+
+ ret void
+}
diff --git a/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-scalars.ll b/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-scalars.ll
new file mode 100644
index 0000000..65c9a3e
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-scalars.ll
@@ -0,0 +1,101 @@
+; RUN: opt -S -dxil-resource-access -mtriple=dxil %s | FileCheck %s
+
+; cbuffer CB {
+; float a1; // offset 0, size 4
+; int a2; // offset 4, size 4
+; bool a3; // offset 8, size 4
+; float16_t a4; // offset 12, size 2
+; uint16_t a5; // offset 14, size 2
+; double a6; // offset 16, size 8
+; int64_t a7; // offset 24, size 8
+; }
+%__cblayout_CB = type <{ float, i32, i32, half, i16, double, i64 }>
+
+@CB.cb = local_unnamed_addr global target("dx.CBuffer", %__cblayout_CB) poison
+
+; CHECK: define void @f
+define void @f(ptr %dst) {
+entry:
+ %CB.cb_h.i.i = tail call target("dx.CBuffer", %__cblayout_CB) @llvm.dx.resource.handlefrombinding(i32 0, i32 0, i32 1, i32 0, ptr null)
+ store target("dx.CBuffer", %__cblayout_CB) %CB.cb_h.i.i, ptr @CB.cb, align 4
+
+ ; CHECK: [[CB:%.*]] = load target("dx.CBuffer", %__cblayout_CB), ptr @CB.cb
+ %CB.cb = load target("dx.CBuffer", %__cblayout_CB), ptr @CB.cb, align 8
+
+ ;; a1
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { float, float, float, float } @llvm.dx.resource.load.cbufferrow.4.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 0)
+ ; CHECK: [[A1:%.*]] = extractvalue { float, float, float, float } [[LOAD]], 0
+ ; CHECK: store float [[A1]], ptr %dst
+ %a1_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 0)
+ %a1 = load float, ptr addrspace(2) %a1_ptr, align 4
+ store float %a1, ptr %dst, align 8
+
+ ;; a2
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { i32, i32, i32, i32 } @llvm.dx.resource.load.cbufferrow.4.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 0)
+ ; CHECK: [[A2:%.*]] = extractvalue { i32, i32, i32, i32 } [[LOAD]], 1
+ ; CHECK: [[PTR:%.*]] = getelementptr inbounds nuw i8, ptr %dst, i32 4
+ ; CHECK: store i32 [[A2]], ptr [[PTR]]
+ %a2_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 4)
+ %a2 = load i32, ptr addrspace(2) %a2_ptr, align 4
+ %a2.i = getelementptr inbounds nuw i8, ptr %dst, i32 4
+ store i32 %a2, ptr %a2.i, align 8
+
+ ;; a3
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { i32, i32, i32, i32 } @llvm.dx.resource.load.cbufferrow.4.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 0)
+ ; CHECK: [[A3:%.*]] = extractvalue { i32, i32, i32, i32 } [[LOAD]], 2
+ ; CHECK: [[PTR:%.*]] = getelementptr inbounds nuw i8, ptr %dst, i32 8
+ ; CHECK: store i32 [[A3]], ptr [[PTR]]
+ %a3_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 8)
+ %a3 = load i32, ptr addrspace(2) %a3_ptr, align 4
+ %a3.i = getelementptr inbounds nuw i8, ptr %dst, i32 8
+ store i32 %a3, ptr %a3.i, align 4
+
+ ;; a4
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { half, half, half, half, half, half, half, half } @llvm.dx.resource.load.cbufferrow.8.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 0)
+ ; CHECK: [[A4:%.*]] = extractvalue { half, half, half, half, half, half, half, half } [[LOAD]], 6
+ ; CHECK: [[PTR:%.*]] = getelementptr inbounds nuw i8, ptr %dst, i32 12
+ ; CHECK: store half [[A4]], ptr [[PTR]]
+ %a4_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 12)
+ %a4 = load half, ptr addrspace(2) %a4_ptr, align 2
+ %a4.i = getelementptr inbounds nuw i8, ptr %dst, i32 12
+ store half %a4, ptr %a4.i, align 4
+
+ ;; a5
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { i16, i16, i16, i16, i16, i16, i16, i16 } @llvm.dx.resource.load.cbufferrow.8.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 0)
+ ; CHECK: [[A5:%.*]] = extractvalue { i16, i16, i16, i16, i16, i16, i16, i16 } [[LOAD]], 7
+ ; CHECK: [[PTR:%.*]] = getelementptr inbounds nuw i8, ptr %dst, i32 14
+ ; CHECK: store i16 [[A5]], ptr [[PTR]]
+ %a5_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 14)
+ %a5 = load i16, ptr addrspace(2) %a5_ptr, align 2
+ %a5.i = getelementptr inbounds nuw i8, ptr %dst, i32 14
+ store i16 %a5, ptr %a5.i, align 2
+
+ ;; a6
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { double, double } @llvm.dx.resource.load.cbufferrow.2.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 1)
+ ; CHECK: [[A6:%.*]] = extractvalue { double, double } [[LOAD]], 0
+ ; CHECK: [[PTR:%.*]] = getelementptr inbounds nuw i8, ptr %dst, i32 16
+ ; CHECK: store double [[A6]], ptr [[PTR]]
+ %a6_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 16)
+ %a6 = load double, ptr addrspace(2) %a6_ptr, align 8
+ %a6.i = getelementptr inbounds nuw i8, ptr %dst, i32 16
+ store double %a6, ptr %a6.i, align 8
+
+ ;; a7
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { i64, i64 } @llvm.dx.resource.load.cbufferrow.2.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 1)
+ ; CHECK: [[A7:%.*]] = extractvalue { i64, i64 } [[LOAD]], 1
+ ; CHECK: [[PTR:%.*]] = getelementptr inbounds nuw i8, ptr %dst, i32 24
+ ; CHECK: store i64 [[A7]], ptr [[PTR]]
+ %a7_ptr = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 24)
+ %a7 = load i64, ptr addrspace(2) %a7_ptr, align 8
+ %a7.i = getelementptr inbounds nuw i8, ptr %dst, i32 24
+ store i64 %a7, ptr %a7.i, align 8
+
+ ret void
+}
diff --git a/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-vectors.ll b/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-vectors.ll
new file mode 100644
index 0000000..0156a1a
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/ResourceAccess/load-cbuffer-vectors.ll
@@ -0,0 +1,121 @@
+; RUN: opt -S -dxil-resource-access -mtriple=dxil %s | FileCheck %s
+
+; cbuffer CB {
+; float3 a1; // offset 0, size 12 (+4)
+; double3 a2; // offset 16, size 24
+; float16_t2 a3; // offset 40, size 4 (+4)
+; uint64_t3 a4; // offset 48, size 24 (+8)
+; int4 a5; // offset 80, size 16
+; uint16_t3 a6; // offset 96, size 6
+; };
+%__cblayout_CB = type <{ <3 x float>, target("dx.Padding", 4), <3 x double>, <2 x half>, target("dx.Padding", 4), <3 x i64>, target("dx.Padding", 8), <4 x i32>, <3 x i16> }>
+
+@CB.cb = local_unnamed_addr global target("dx.CBuffer", %__cblayout_CB) poison
+
+; CHECK: define void @f
+define void @f(ptr %dst) {
+entry:
+ %CB.cb_h.i.i = tail call target("dx.CBuffer", %__cblayout_CB) @llvm.dx.resource.handlefrombinding(i32 0, i32 0, i32 1, i32 0, ptr null)
+ store target("dx.CBuffer", %__cblayout_CB) %CB.cb_h.i.i, ptr @CB.cb, align 4
+
+ ; CHECK: [[CB:%.*]] = load target("dx.CBuffer", %__cblayout_CB), ptr @CB.cb
+ %CB.cb = load target("dx.CBuffer", %__cblayout_CB), ptr @CB.cb, align 8
+
+ ;; a1
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { float, float, float, float } @llvm.dx.resource.load.cbufferrow.4.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 0)
+ ; CHECK: [[X:%.*]] = extractvalue { float, float, float, float } [[LOAD]], 0
+ ; CHECK: [[Y:%.*]] = extractvalue { float, float, float, float } [[LOAD]], 1
+ ; CHECK: [[Z:%.*]] = extractvalue { float, float, float, float } [[LOAD]], 2
+ ; CHECK: [[VEC0:%.*]] = insertelement <3 x float> poison, float [[X]], i32 0
+ ; CHECK: [[VEC1:%.*]] = insertelement <3 x float> [[VEC0]], float [[Y]], i32 1
+ ; CHECK: [[VEC2:%.*]] = insertelement <3 x float> [[VEC1]], float [[Z]], i32 2
+ ; CHECK: store <3 x float> [[VEC2]], ptr %dst
+ %a1_gep = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 0)
+ %a1 = load <3 x float>, ptr addrspace(2) %a1_gep, align 16
+ store <3 x float> %a1, ptr %dst, align 4
+
+ ;; a2
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { double, double } @llvm.dx.resource.load.cbufferrow.2.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 1)
+ ; CHECK: [[X:%.*]] = extractvalue { double, double } [[LOAD]], 0
+ ; CHECK: [[Y:%.*]] = extractvalue { double, double } [[LOAD]], 1
+ ; CHECK: [[LOAD:%.*]] = call { double, double } @llvm.dx.resource.load.cbufferrow.2.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 2)
+ ; CHECK: [[Z:%.*]] = extractvalue { double, double } [[LOAD]], 0
+ ; CHECK: [[VEC0:%.*]] = insertelement <3 x double> poison, double [[X]], i32 0
+ ; CHECK: [[VEC1:%.*]] = insertelement <3 x double> [[VEC0]], double [[Y]], i32 1
+ ; CHECK: [[VEC2:%.*]] = insertelement <3 x double> [[VEC1]], double [[Z]], i32 2
+ ; CHECK: [[PTR:%.*]] = getelementptr inbounds nuw i8, ptr %dst, i32 16
+ ; CHECK: store <3 x double> [[VEC2]], ptr [[PTR]]
+ %a2_gep = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 16)
+ %a2 = load <3 x double>, ptr addrspace(2) %a2_gep, align 32
+ %a2.i = getelementptr inbounds nuw i8, ptr %dst, i32 16
+ store <3 x double> %a2, ptr %a2.i, align 8
+
+ ;; a3
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { half, half, half, half, half, half, half, half } @llvm.dx.resource.load.cbufferrow.8.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 2)
+ ; CHECK: [[X:%.*]] = extractvalue { half, half, half, half, half, half, half, half } [[LOAD]], 4
+ ; CHECK: [[Y:%.*]] = extractvalue { half, half, half, half, half, half, half, half } [[LOAD]], 5
+ ; CHECK: [[VEC0:%.*]] = insertelement <2 x half> poison, half [[X]], i32 0
+ ; CHECK: [[VEC1:%.*]] = insertelement <2 x half> [[VEC0]], half [[Y]], i32 1
+ ; CHECK: [[PTR:%.*]] = getelementptr inbounds nuw i8, ptr %dst, i32 40
+ ; CHECK: store <2 x half> [[VEC1]], ptr [[PTR]]
+ %a3_gep = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 40)
+ %a3 = load <2 x half>, ptr addrspace(2) %a3_gep, align 4
+ %a3.i = getelementptr inbounds nuw i8, ptr %dst, i32 40
+ store <2 x half> %a3, ptr %a3.i, align 2
+
+ ;; a4
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { i64, i64 } @llvm.dx.resource.load.cbufferrow.2.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 3)
+ ; CHECK: [[X:%.*]] = extractvalue { i64, i64 } [[LOAD]], 0
+ ; CHECK: [[Y:%.*]] = extractvalue { i64, i64 } [[LOAD]], 1
+ ; CHECK: [[LOAD:%.*]] = call { i64, i64 } @llvm.dx.resource.load.cbufferrow.2.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 4)
+ ; CHECK: [[Z:%.*]] = extractvalue { i64, i64 } [[LOAD]], 0
+ ; CHECK: [[VEC0:%.*]] = insertelement <3 x i64> poison, i64 [[X]], i32 0
+ ; CHECK: [[VEC1:%.*]] = insertelement <3 x i64> [[VEC0]], i64 [[Y]], i32 1
+ ; CHECK: [[VEC2:%.*]] = insertelement <3 x i64> [[VEC1]], i64 [[Z]], i32 2
+ ; CHECK: [[PTR:%.*]] = getelementptr inbounds nuw i8, ptr %dst, i32 48
+ ; CHECK: store <3 x i64> [[VEC2]], ptr [[PTR]]
+ %a4_gep = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 48)
+ %a4 = load <3 x i64>, ptr addrspace(2) %a4_gep, align 32
+ %a4.i = getelementptr inbounds nuw i8, ptr %dst, i32 48
+ store <3 x i64> %a4, ptr %a4.i, align 8
+
+ ;; a5
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { i32, i32, i32, i32 } @llvm.dx.resource.load.cbufferrow.4.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 5)
+ ; CHECK: [[X:%.*]] = extractvalue { i32, i32, i32, i32 } [[LOAD]], 0
+ ; CHECK: [[Y:%.*]] = extractvalue { i32, i32, i32, i32 } [[LOAD]], 1
+ ; CHECK: [[Z:%.*]] = extractvalue { i32, i32, i32, i32 } [[LOAD]], 2
+ ; CHECK: [[A:%.*]] = extractvalue { i32, i32, i32, i32 } [[LOAD]], 3
+ ; CHECK: [[VEC0:%.*]] = insertelement <4 x i32> poison, i32 [[X]], i32 0
+ ; CHECK: [[VEC1:%.*]] = insertelement <4 x i32> [[VEC0]], i32 [[Y]], i32 1
+ ; CHECK: [[VEC2:%.*]] = insertelement <4 x i32> [[VEC1]], i32 [[Z]], i32 2
+ ; CHECK: [[VEC3:%.*]] = insertelement <4 x i32> [[VEC2]], i32 [[A]], i32 3
+ ; CHECK: [[PTR:%.*]] = getelementptr inbounds nuw i8, ptr %dst, i32 72
+ ; CHECK: store <4 x i32> [[VEC3]], ptr [[PTR]]
+ %a5_gep = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 80)
+ %a5 = load <4 x i32>, ptr addrspace(2) %a5_gep, align 16
+ %a5.i = getelementptr inbounds nuw i8, ptr %dst, i32 72
+ store <4 x i32> %a5, ptr %a5.i, align 4
+
+ ;; a6
+ ;
+ ; CHECK: [[LOAD:%.*]] = call { i16, i16, i16, i16, i16, i16, i16, i16 } @llvm.dx.resource.load.cbufferrow.8.{{.*}}(target("dx.CBuffer", %__cblayout_CB) [[CB]], i32 6)
+ ; CHECK: [[X:%.*]] = extractvalue { i16, i16, i16, i16, i16, i16, i16, i16 } [[LOAD]], 0
+ ; CHECK: [[Y:%.*]] = extractvalue { i16, i16, i16, i16, i16, i16, i16, i16 } [[LOAD]], 1
+ ; CHECK: [[Z:%.*]] = extractvalue { i16, i16, i16, i16, i16, i16, i16, i16 } [[LOAD]], 2
+ ; CHECK: [[VEC0:%.*]] = insertelement <3 x i16> poison, i16 [[X]], i32 0
+ ; CHECK: [[VEC1:%.*]] = insertelement <3 x i16> [[VEC0]], i16 [[Y]], i32 1
+ ; CHECK: [[VEC2:%.*]] = insertelement <3 x i16> [[VEC1]], i16 [[Z]], i32 2
+ ; CHECK: [[PTR:%.*]] = getelementptr inbounds nuw i8, ptr %dst, i32 88
+ ; CHECK: store <3 x i16> [[VEC2]], ptr [[PTR]]
+ %a6_gep = call ptr addrspace(2) @llvm.dx.resource.getpointer(target("dx.CBuffer", %__cblayout_CB) %CB.cb, i32 96)
+ %a6 = load <3 x i16>, ptr addrspace(2) %a6_gep, align 8
+ %a6.i = getelementptr inbounds nuw i8, ptr %dst, i32 88
+ store <3 x i16> %a6, ptr %a6.i, align 2
+
+ ret void
+}
diff --git a/llvm/test/CodeGen/Hexagon/instrprof-custom.ll b/llvm/test/CodeGen/Hexagon/instrprof-custom.ll
index 620b2ac..1c1965d 100644
--- a/llvm/test/CodeGen/Hexagon/instrprof-custom.ll
+++ b/llvm/test/CodeGen/Hexagon/instrprof-custom.ll
@@ -1,5 +1,5 @@
; RUN: llc -mtriple=hexagon -relocation-model=pic < %s | FileCheck %s
-; RUN: llc -mtriple=hexagon < %s | FileCheck %s
+; RUN: llc -mtriple=hexagon --mattr=+hvxv68,+hvx-length128b,+hvx-qfloat,-hvx-ieee-fp < %s | FileCheck %s
; CHECK-LABEL: test1:
; CHECK: {{call my_instrprof_handler|r0 = #999}}
@@ -14,7 +14,4 @@ entry:
}
; Function Attrs: inaccessiblememonly nofree nosync nounwind willreturn
-declare void @llvm.hexagon.instrprof.custom(ptr, i32) #1
-
-attributes #0 = { "target-features"="+hvxv68,+hvx-length128b,+hvx-qfloat,-hvx-ieee-fp,+hmxv68" }
-attributes #1 = { inaccessiblememonly nofree nosync nounwind willreturn }
+declare void @llvm.hexagon.instrprof.custom(ptr, i32)
diff --git a/llvm/test/CodeGen/MLRegAlloc/dev-mode-extra-features-logging.ll b/llvm/test/CodeGen/MLRegAlloc/dev-mode-extra-features-logging.ll
deleted file mode 100644
index 9dd402d..0000000
--- a/llvm/test/CodeGen/MLRegAlloc/dev-mode-extra-features-logging.ll
+++ /dev/null
@@ -1,48 +0,0 @@
-; REQUIRES: have_tflite
-; REQUIRES: x86_64-linux
-;
-; Check that we log the currently in development features correctly with both the default
-; case and with a learned policy.
-;
-; RUN: llc -o /dev/null -mtriple=x86_64-linux-unknown -regalloc=greedy \
-; RUN: -regalloc-enable-advisor=development \
-; RUN: -regalloc-training-log=%t1 \
-; RUN: -regalloc-enable-development-features < %S/Inputs/input.ll
-; RUN: %python %S/../../../lib/Analysis/models/log_reader.py %t1 > %t1.readable
-; RUN: FileCheck --input-file %t1.readable %s
-
-; RUN: rm -rf %t && mkdir %t
-; RUN: %python %S/../../../lib/Analysis/models/gen-regalloc-eviction-test-model.py %t_savedmodel
-; RUN: %python %S/../../../lib/Analysis/models/saved-model-to-tflite.py %t_savedmodel %t
-; RUN: llc -o /dev/null -mtriple=x86_64-linux-unknown -regalloc=greedy \
-; RUN: -regalloc-enable-advisor=development \
-; RUN: -regalloc-training-log=%t2 -regalloc-model=%t \
-; RUN: -regalloc-enable-development-features < %S/Inputs/input.ll
-; RUN: %python %S/../../../lib/Analysis/models/log_reader.py %t2 > %t2.readable
-; RUN: FileCheck --input-file %t2.readable %s
-
-; CHECK-NOT: nan
-; Check the first five opcodes in the first eviction problem
-; Also, the first eviction problem is significantly less than 300 instructions. Check
-; that there is a zero value.
-; Note: we're regex-ing some of the opcodes to avoid test flakyness.
-; CHECK: instructions: 20,{{([0-9]{4})}},{{([0-9]{4})}},{{([0-9]{4})}},{{.*}},0,
-; Only the candidate virtreg and the 10th LR are included in this problem. Make
-; sure the other LRs have values of zero. There are 2700 0s followed by some 1s.
-; There's a limit to how many repetitions can be matched.
-; CHECK: instructions_mapping: {{(((0,){27}){100})}}
-; CHECK-SAME: 1
-; Indexing 300 back from where the candidate vr actual resides due to the fact
-; that not all the values between the 10th LR and the candidate are zero.
-; CHECK-SAME-COUNT-6600: 0,
-; CHECK-SAME: 1
-; Ensure that we can still go through the mapping matrices for the rest of the
-; eviction problems to make sure we haven't hit the end of the matrix above.
-; There are a total of 23 eviction problems with this test.
-; CHECK-LABEL: observation: 16
-; Make sure that we're exporting the mbb_frequencies. Don't actually check
-; values due to all values being floating point/liable to change very easily.
-; CHECK: mbb_frequencies:
-; Make sure that we have the mbb_mapping feature, and that the first couple
-; of values are correct.
-; CHECK: mbb_mapping: 0,0,0,0,1,1,1
diff --git a/llvm/test/CodeGen/PowerPC/llvm.sincos.ll b/llvm/test/CodeGen/PowerPC/llvm.sincos.ll
index aaf81ff..5b4e91c 100644
--- a/llvm/test/CodeGen/PowerPC/llvm.sincos.ll
+++ b/llvm/test/CodeGen/PowerPC/llvm.sincos.ll
@@ -26,30 +26,6 @@ define { ppc_fp128, ppc_fp128 } @test_sincos_ppcf128(ppc_fp128 %a) {
ret { ppc_fp128, ppc_fp128 } %result
}
-define { ppc_fp128, ppc_fp128 } @test_sincospi_ppcf128(ppc_fp128 %a) {
-; CHECK-LABEL: test_sincospi_ppcf128:
-; CHECK: # %bb.0:
-; CHECK-NEXT: mflr r0
-; CHECK-NEXT: stdu r1, -64(r1)
-; CHECK-NEXT: std r0, 80(r1)
-; CHECK-NEXT: .cfi_def_cfa_offset 64
-; CHECK-NEXT: .cfi_offset lr, 16
-; CHECK-NEXT: addi r5, r1, 48
-; CHECK-NEXT: addi r6, r1, 32
-; CHECK-NEXT: bl sincospil
-; CHECK-NEXT: nop
-; CHECK-NEXT: lfd f1, 48(r1)
-; CHECK-NEXT: lfd f2, 56(r1)
-; CHECK-NEXT: lfd f3, 32(r1)
-; CHECK-NEXT: lfd f4, 40(r1)
-; CHECK-NEXT: addi r1, r1, 64
-; CHECK-NEXT: ld r0, 16(r1)
-; CHECK-NEXT: mtlr r0
-; CHECK-NEXT: blr
- %result = call { ppc_fp128, ppc_fp128 } @llvm.sincospi.ppcf128(ppc_fp128 %a)
- ret { ppc_fp128, ppc_fp128 } %result
-}
-
; FIXME: This could be made a tail call with the default expansion of llvm.sincos.
define void @test_sincos_ppcf128_void_tail_call(ppc_fp128 %a, ptr noalias %out_sin, ptr noalias %out_cos) {
; CHECK-LABEL: test_sincos_ppcf128_void_tail_call:
@@ -73,29 +49,6 @@ define void @test_sincos_ppcf128_void_tail_call(ppc_fp128 %a, ptr noalias %out_s
ret void
}
-; FIXME: This could be made a tail call with the default expansion of llvm.sincospi.
-define void @test_sincospi_ppcf128_void_tail_call(ppc_fp128 %a, ptr noalias %out_sin, ptr noalias %out_cos) {
-; CHECK-LABEL: test_sincospi_ppcf128_void_tail_call:
-; CHECK: # %bb.0:
-; CHECK-NEXT: mflr r0
-; CHECK-NEXT: stdu r1, -32(r1)
-; CHECK-NEXT: std r0, 48(r1)
-; CHECK-NEXT: .cfi_def_cfa_offset 32
-; CHECK-NEXT: .cfi_offset lr, 16
-; CHECK-NEXT: bl sincospil
-; CHECK-NEXT: nop
-; CHECK-NEXT: addi r1, r1, 32
-; CHECK-NEXT: ld r0, 16(r1)
-; CHECK-NEXT: mtlr r0
-; CHECK-NEXT: blr
- %result = tail call { ppc_fp128, ppc_fp128 } @llvm.sincospi.ppcf128(ppc_fp128 %a)
- %result.0 = extractvalue { ppc_fp128, ppc_fp128 } %result, 0
- %result.1 = extractvalue { ppc_fp128, ppc_fp128 } %result, 1
- store ppc_fp128 %result.0, ptr %out_sin, align 16
- store ppc_fp128 %result.1, ptr %out_cos, align 16
- ret void
-}
-
; NOTE: This would need a struct-return library call for llvm.sincos to become a tail call.
define { ppc_fp128, ppc_fp128 } @test_sincos_ppcf128_tail_call(ppc_fp128 %a) {
; CHECK-LABEL: test_sincos_ppcf128_tail_call:
@@ -120,28 +73,3 @@ define { ppc_fp128, ppc_fp128 } @test_sincos_ppcf128_tail_call(ppc_fp128 %a) {
%result = tail call { ppc_fp128, ppc_fp128 } @llvm.sincos.ppcf128(ppc_fp128 %a)
ret { ppc_fp128, ppc_fp128 } %result
}
-
-; NOTE: This would need a struct-return library call for llvm.sincospi to become a tail call.
-define { ppc_fp128, ppc_fp128 } @test_sincospi_ppcf128_tail_call(ppc_fp128 %a) {
-; CHECK-LABEL: test_sincospi_ppcf128_tail_call:
-; CHECK: # %bb.0:
-; CHECK-NEXT: mflr r0
-; CHECK-NEXT: stdu r1, -64(r1)
-; CHECK-NEXT: std r0, 80(r1)
-; CHECK-NEXT: .cfi_def_cfa_offset 64
-; CHECK-NEXT: .cfi_offset lr, 16
-; CHECK-NEXT: addi r5, r1, 48
-; CHECK-NEXT: addi r6, r1, 32
-; CHECK-NEXT: bl sincospil
-; CHECK-NEXT: nop
-; CHECK-NEXT: lfd f1, 48(r1)
-; CHECK-NEXT: lfd f2, 56(r1)
-; CHECK-NEXT: lfd f3, 32(r1)
-; CHECK-NEXT: lfd f4, 40(r1)
-; CHECK-NEXT: addi r1, r1, 64
-; CHECK-NEXT: ld r0, 16(r1)
-; CHECK-NEXT: mtlr r0
-; CHECK-NEXT: blr
- %result = tail call { ppc_fp128, ppc_fp128 } @llvm.sincospi.ppcf128(ppc_fp128 %a)
- ret { ppc_fp128, ppc_fp128 } %result
-}
diff --git a/llvm/test/CodeGen/PowerPC/llvm.sincospi.ll b/llvm/test/CodeGen/PowerPC/llvm.sincospi.ll
new file mode 100644
index 0000000..75e7559
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/llvm.sincospi.ll
@@ -0,0 +1,21 @@
+; RUN: not llc -mtriple=powerpc64le-gnu-linux -filetype=null %s 2>&1 | FileCheck %s
+
+; CHECK: error: no libcall available for fsincospi
+define { half, half } @test_sincospi_f16(half %a) #0 {
+ %result = call { half, half } @llvm.sincospi.f16(half %a)
+ ret { half, half } %result
+}
+
+; CHECK: error: no libcall available for fsincospi
+define { float, float } @test_sincospi_f32(float %a) #0 {
+ %result = call { float, float } @llvm.sincospi.f32(float %a)
+ ret { float, float } %result
+}
+
+; CHECK: error: no libcall available for fsincospi
+define { double, double } @test_sincospi_f64(double %a) #0 {
+ %result = call { double, double } @llvm.sincospi.f64(double %a)
+ ret { double, double } %result
+}
+
+attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/PowerPC/llvm.sincospi.ppcfp128.ll b/llvm/test/CodeGen/PowerPC/llvm.sincospi.ppcfp128.ll
new file mode 100644
index 0000000..bc656bb
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/llvm.sincospi.ppcfp128.ll
@@ -0,0 +1,25 @@
+; XFAIL: *
+; FIXME: asserts
+; RUN: llc -mcpu=pwr9 -mtriple=powerpc64le-gnu-linux -filetype=null \
+; RUN: -ppc-vsr-nums-as-vr -ppc-asm-full-reg-names %s
+
+define { ppc_fp128, ppc_fp128 } @test_sincospi_ppcf128(ppc_fp128 %a) {
+ %result = call { ppc_fp128, ppc_fp128 } @llvm.sincospi.ppcf128(ppc_fp128 %a)
+ ret { ppc_fp128, ppc_fp128 } %result
+}
+
+; FIXME: This could be made a tail call with the default expansion of llvm.sincospi.
+define void @test_sincospi_ppcf128_void_tail_call(ppc_fp128 %a, ptr noalias %out_sin, ptr noalias %out_cos) {
+ %result = tail call { ppc_fp128, ppc_fp128 } @llvm.sincospi.ppcf128(ppc_fp128 %a)
+ %result.0 = extractvalue { ppc_fp128, ppc_fp128 } %result, 0
+ %result.1 = extractvalue { ppc_fp128, ppc_fp128 } %result, 1
+ store ppc_fp128 %result.0, ptr %out_sin, align 16
+ store ppc_fp128 %result.1, ptr %out_cos, align 16
+ ret void
+}
+
+; NOTE: This would need a struct-return library call for llvm.sincospi to become a tail call.
+define { ppc_fp128, ppc_fp128 } @test_sincospi_ppcf128_tail_call(ppc_fp128 %a) {
+ %result = tail call { ppc_fp128, ppc_fp128 } @llvm.sincospi.ppcf128(ppc_fp128 %a)
+ ret { ppc_fp128, ppc_fp128 } %result
+}
diff --git a/llvm/test/CodeGen/PowerPC/milicode32.ll b/llvm/test/CodeGen/PowerPC/milicode32.ll
index 78d0362..ddadd01 100644
--- a/llvm/test/CodeGen/PowerPC/milicode32.ll
+++ b/llvm/test/CodeGen/PowerPC/milicode32.ll
@@ -69,3 +69,59 @@ entry:
}
declare i32 @strlen(ptr noundef) nounwind
+
+define ptr @test_memmove(ptr noundef %destination, ptr noundef %source, i32 noundef %num) #0 {
+; CHECK-AIX-32-P9-LABEL: test_memmove:
+; CHECK-AIX-32-P9: # %bb.0: # %entry
+; CHECK-AIX-32-P9-NEXT: mflr r0
+; CHECK-AIX-32-P9-NEXT: stwu r1, -80(r1)
+; CHECK-AIX-32-P9-NEXT: stw r0, 88(r1)
+; CHECK-AIX-32-P9-NEXT: stw r31, 76(r1) # 4-byte Folded Spill
+; CHECK-AIX-32-P9-NEXT: mr r31, r3
+; CHECK-AIX-32-P9-NEXT: stw r3, 72(r1)
+; CHECK-AIX-32-P9-NEXT: stw r4, 68(r1)
+; CHECK-AIX-32-P9-NEXT: stw r5, 64(r1)
+; CHECK-AIX-32-P9-NEXT: bl .___memmove[PR]
+; CHECK-AIX-32-P9-NEXT: nop
+; CHECK-AIX-32-P9-NEXT: mr r3, r31
+; CHECK-AIX-32-P9-NEXT: lwz r31, 76(r1) # 4-byte Folded Reload
+; CHECK-AIX-32-P9-NEXT: addi r1, r1, 80
+; CHECK-AIX-32-P9-NEXT: lwz r0, 8(r1)
+; CHECK-AIX-32-P9-NEXT: mtlr r0
+; CHECK-AIX-32-P9-NEXT: blr
+;
+; CHECK-LINUX32-P9-LABEL: test_memmove:
+; CHECK-LINUX32-P9: # %bb.0: # %entry
+; CHECK-LINUX32-P9-NEXT: mflr r0
+; CHECK-LINUX32-P9-NEXT: stwu r1, -32(r1)
+; CHECK-LINUX32-P9-NEXT: stw r0, 36(r1)
+; CHECK-LINUX32-P9-NEXT: .cfi_def_cfa_offset 32
+; CHECK-LINUX32-P9-NEXT: .cfi_offset lr, 4
+; CHECK-LINUX32-P9-NEXT: .cfi_offset r30, -8
+; CHECK-LINUX32-P9-NEXT: stw r30, 24(r1) # 4-byte Folded Spill
+; CHECK-LINUX32-P9-NEXT: mr r30, r3
+; CHECK-LINUX32-P9-NEXT: stw r3, 20(r1)
+; CHECK-LINUX32-P9-NEXT: stw r4, 16(r1)
+; CHECK-LINUX32-P9-NEXT: stw r5, 12(r1)
+; CHECK-LINUX32-P9-NEXT: bl memmove
+; CHECK-LINUX32-P9-NEXT: mr r3, r30
+; CHECK-LINUX32-P9-NEXT: lwz r30, 24(r1) # 4-byte Folded Reload
+; CHECK-LINUX32-P9-NEXT: lwz r0, 36(r1)
+; CHECK-LINUX32-P9-NEXT: addi r1, r1, 32
+; CHECK-LINUX32-P9-NEXT: mtlr r0
+; CHECK-LINUX32-P9-NEXT: blr
+entry:
+ %destination.addr = alloca ptr, align 4
+ %source.addr = alloca ptr, align 4
+ %num.addr = alloca i32, align 4
+ store ptr %destination, ptr %destination.addr, align 4
+ store ptr %source, ptr %source.addr, align 4
+ store i32 %num, ptr %num.addr, align 4
+ %0 = load ptr, ptr %destination.addr, align 4
+ %1 = load ptr, ptr %source.addr, align 4
+ %2 = load i32, ptr %num.addr, align 4
+ call void @llvm.memmove.p0.p0.i32(ptr align 1 %0, ptr align 1 %1, i32 %2, i1 false)
+ ret ptr %0
+}
+
+declare void @llvm.memmove.p0.p0.i32(ptr writeonly captures(none), ptr readonly captures(none), i32, i1 immarg)
diff --git a/llvm/test/CodeGen/PowerPC/milicode64.ll b/llvm/test/CodeGen/PowerPC/milicode64.ll
index 8b87529..2dbf414 100644
--- a/llvm/test/CodeGen/PowerPC/milicode64.ll
+++ b/llvm/test/CodeGen/PowerPC/milicode64.ll
@@ -100,3 +100,82 @@ entry:
}
declare i64 @strlen(ptr noundef) nounwind
+
+define ptr @test_memmove(ptr noundef %destination, ptr noundef %source, i64 noundef %num) #0 {
+; CHECK-LE-P9-LABEL: test_memmove:
+; CHECK-LE-P9: # %bb.0: # %entry
+; CHECK-LE-P9-NEXT: mflr r0
+; CHECK-LE-P9-NEXT: .cfi_def_cfa_offset 80
+; CHECK-LE-P9-NEXT: .cfi_offset lr, 16
+; CHECK-LE-P9-NEXT: .cfi_offset r30, -16
+; CHECK-LE-P9-NEXT: std r30, -16(r1) # 8-byte Folded Spill
+; CHECK-LE-P9-NEXT: stdu r1, -80(r1)
+; CHECK-LE-P9-NEXT: std r0, 96(r1)
+; CHECK-LE-P9-NEXT: mr r30, r3
+; CHECK-LE-P9-NEXT: std r3, 56(r1)
+; CHECK-LE-P9-NEXT: std r4, 48(r1)
+; CHECK-LE-P9-NEXT: std r5, 40(r1)
+; CHECK-LE-P9-NEXT: bl memmove
+; CHECK-LE-P9-NEXT: nop
+; CHECK-LE-P9-NEXT: mr r3, r30
+; CHECK-LE-P9-NEXT: addi r1, r1, 80
+; CHECK-LE-P9-NEXT: ld r0, 16(r1)
+; CHECK-LE-P9-NEXT: ld r30, -16(r1) # 8-byte Folded Reload
+; CHECK-LE-P9-NEXT: mtlr r0
+; CHECK-LE-P9-NEXT: blr
+;
+; CHECK-BE-P9-LABEL: test_memmove:
+; CHECK-BE-P9: # %bb.0: # %entry
+; CHECK-BE-P9-NEXT: mflr r0
+; CHECK-BE-P9-NEXT: stdu r1, -160(r1)
+; CHECK-BE-P9-NEXT: std r0, 176(r1)
+; CHECK-BE-P9-NEXT: .cfi_def_cfa_offset 160
+; CHECK-BE-P9-NEXT: .cfi_offset lr, 16
+; CHECK-BE-P9-NEXT: .cfi_offset r30, -16
+; CHECK-BE-P9-NEXT: std r30, 144(r1) # 8-byte Folded Spill
+; CHECK-BE-P9-NEXT: mr r30, r3
+; CHECK-BE-P9-NEXT: std r3, 136(r1)
+; CHECK-BE-P9-NEXT: std r4, 128(r1)
+; CHECK-BE-P9-NEXT: std r5, 120(r1)
+; CHECK-BE-P9-NEXT: bl memmove
+; CHECK-BE-P9-NEXT: nop
+; CHECK-BE-P9-NEXT: mr r3, r30
+; CHECK-BE-P9-NEXT: ld r30, 144(r1) # 8-byte Folded Reload
+; CHECK-BE-P9-NEXT: addi r1, r1, 160
+; CHECK-BE-P9-NEXT: ld r0, 16(r1)
+; CHECK-BE-P9-NEXT: mtlr r0
+; CHECK-BE-P9-NEXT: blr
+;
+; CHECK-AIX-64-P9-LABEL: test_memmove:
+; CHECK-AIX-64-P9: # %bb.0: # %entry
+; CHECK-AIX-64-P9-NEXT: mflr r0
+; CHECK-AIX-64-P9-NEXT: stdu r1, -144(r1)
+; CHECK-AIX-64-P9-NEXT: std r0, 160(r1)
+; CHECK-AIX-64-P9-NEXT: std r31, 136(r1) # 8-byte Folded Spill
+; CHECK-AIX-64-P9-NEXT: mr r31, r3
+; CHECK-AIX-64-P9-NEXT: std r3, 128(r1)
+; CHECK-AIX-64-P9-NEXT: std r4, 120(r1)
+; CHECK-AIX-64-P9-NEXT: std r5, 112(r1)
+; CHECK-AIX-64-P9-NEXT: bl .___memmove64[PR]
+; CHECK-AIX-64-P9-NEXT: nop
+; CHECK-AIX-64-P9-NEXT: mr r3, r31
+; CHECK-AIX-64-P9-NEXT: ld r31, 136(r1) # 8-byte Folded Reload
+; CHECK-AIX-64-P9-NEXT: addi r1, r1, 144
+; CHECK-AIX-64-P9-NEXT: ld r0, 16(r1)
+; CHECK-AIX-64-P9-NEXT: mtlr r0
+; CHECK-AIX-64-P9-NEXT: blr
+entry:
+ %destination.addr = alloca ptr, align 8
+ %source.addr = alloca ptr, align 8
+ %num.addr = alloca i64, align 8
+ store ptr %destination, ptr %destination.addr, align 8
+ store ptr %source, ptr %source.addr, align 8
+ store i64 %num, ptr %num.addr, align 8
+ %0 = load ptr, ptr %destination.addr, align 8
+ %1 = load ptr, ptr %source.addr, align 8
+ %2 = load i64, ptr %num.addr, align 8
+ call void @llvm.memmove.p0.p0.i64(ptr align 1 %0, ptr align 1 %1, i64 %2, i1 false)
+ ret ptr %0
+}
+
+declare void @llvm.memmove.p0.p0.i32(ptr writeonly captures(none), ptr readonly captures(none), i32, i1 immarg)
diff --git a/llvm/test/CodeGen/RISCV/attributes.ll b/llvm/test/CodeGen/RISCV/attributes.ll
index 22c2d81..f26d4f0 100644
--- a/llvm/test/CodeGen/RISCV/attributes.ll
+++ b/llvm/test/CodeGen/RISCV/attributes.ll
@@ -125,6 +125,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+smcdeleg %s -o - | FileCheck --check-prefixes=CHECK,RV32SMCDELEG %s
; RUN: llc -mtriple=riscv32 -mattr=+smcntrpmf %s -o - | FileCheck --check-prefixes=CHECK,RV32SMCNTRPMF %s
; RUN: llc -mtriple=riscv32 -mattr=+smepmp %s -o - | FileCheck --check-prefixes=CHECK,RV32SMEPMP %s
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-smpmpmt %s -o - | FileCheck --check-prefixes=CHECK,RV32SMPMPMT %s
; RUN: llc -mtriple=riscv32 -mattr=+smrnmi %s -o - | FileCheck --check-prefixes=CHECK,RV32SMRNMI %s
; RUN: llc -mtriple=riscv32 -mattr=+zfbfmin %s -o - | FileCheck --check-prefixes=CHECK,RV32ZFBFMIN %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-zvfbfa %s -o - | FileCheck --check-prefixes=CHECK,RV32ZVFBFA %s
@@ -275,6 +276,7 @@
; RUN: llc -mtriple=riscv64 -mattr=+smcdeleg %s -o - | FileCheck --check-prefixes=CHECK,RV64SMCDELEG %s
; RUN: llc -mtriple=riscv64 -mattr=+smcntrpmf %s -o - | FileCheck --check-prefixes=CHECK,RV64SMCNTRPMF %s
; RUN: llc -mtriple=riscv64 -mattr=+smepmp %s -o - | FileCheck --check-prefixes=CHECK,RV64SMEPMP %s
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-smpmpmt %s -o - | FileCheck --check-prefixes=CHECK,RV64SMPMPMT %s
; RUN: llc -mtriple=riscv64 -mattr=+smrnmi %s -o - | FileCheck --check-prefixes=CHECK,RV64SMRNMI %s
; RUN: llc -mtriple=riscv64 -mattr=+zfbfmin %s -o - | FileCheck --check-prefixes=CHECK,RV64ZFBFMIN %s
; RUN: llc -mtriple=riscv64 -mattr=+experimental-zvfbfa %s -o - | FileCheck --check-prefixes=CHECK,RV64ZVFBFA %s
@@ -439,6 +441,7 @@
; RV32SMCDELEG: .attribute 5, "rv32i2p1_smcdeleg1p0"
; RV32SMCNTRPMF: .attribute 5, "rv32i2p1_smcntrpmf1p0"
; RV32SMEPMP: .attribute 5, "rv32i2p1_smepmp1p0"
+; RV32SMPMPMT: .attribute 5, "rv32i2p1_smpmpmt0p6"
; RV32SMRNMI: .attribute 5, "rv32i2p1_smrnmi1p0"
; RV32ZFBFMIN: .attribute 5, "rv32i2p1_f2p2_zicsr2p0_zfbfmin1p0"
; RV32ZVFBFA: .attribute 5, "rv32i2p1_f2p2_zicsr2p0_zfbfmin1p0_zve32f1p0_zve32x1p0_zvfbfa0p1_zvl32b1p0"
@@ -587,6 +590,7 @@
; RV64SMCDELEG: .attribute 5, "rv64i2p1_smcdeleg1p0"
; RV64SMCNTRPMF: .attribute 5, "rv64i2p1_smcntrpmf1p0"
; RV64SMEPMP: .attribute 5, "rv64i2p1_smepmp1p0"
+; RV64SMPMPMT: .attribute 5, "rv64i2p1_smpmpmt0p6"
; RV64SMRNMI: .attribute 5, "rv64i2p1_smrnmi1p0"
; RV64ZFBFMIN: .attribute 5, "rv64i2p1_f2p2_zicsr2p0_zfbfmin1p0"
; RV64ZVFBFA: .attribute 5, "rv64i2p1_f2p2_zicsr2p0_zfbfmin1p0_zve32f1p0_zve32x1p0_zvfbfa0p1_zvl32b1p0"
diff --git a/llvm/test/CodeGen/RISCV/features-info.ll b/llvm/test/CodeGen/RISCV/features-info.ll
index cf44af6..3d9906f 100644
--- a/llvm/test/CodeGen/RISCV/features-info.ll
+++ b/llvm/test/CodeGen/RISCV/features-info.ll
@@ -27,6 +27,7 @@
; CHECK-NEXT: experimental - Experimental intrinsics.
; CHECK-NEXT: experimental-p - 'P' ('Base P' (Packed SIMD)).
; CHECK-NEXT: experimental-rvm23u32 - RISC-V experimental-rvm23u32 profile.
+; CHECK-NEXT: experimental-smpmpmt - 'Smpmpmt' (PMP-based Memory Types Extension).
; CHECK-NEXT: experimental-svukte - 'Svukte' (Address-Independent Latency of User-Mode Faults to Supervisor Addresses).
; CHECK-NEXT: experimental-xqccmp - 'Xqccmp' (Qualcomm 16-bit Push/Pop and Double Moves).
; CHECK-NEXT: experimental-xqcia - 'Xqcia' (Qualcomm uC Arithmetic Extension).
diff --git a/llvm/test/CodeGen/RISCV/overflow-intrinsics.ll b/llvm/test/CodeGen/RISCV/overflow-intrinsics.ll
index ba6769b..0306bb1 100644
--- a/llvm/test/CodeGen/RISCV/overflow-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/overflow-intrinsics.ll
@@ -232,7 +232,7 @@ define i64 @uaddo3_math_overflow_used(i64 %a, i64 %b, ptr %res) nounwind ssp {
ret i64 %Q
}
-; TODO? CGP sinks the compare before we have a chance to form the overflow intrinsic.
+; Ensure CGP doesn't sink the compare before we have a chance to form the overflow intrinsic.
define i64 @uaddo4(i64 %a, i64 %b, i1 %c) nounwind ssp {
; RV32-LABEL: uaddo4:
@@ -1076,41 +1076,37 @@ define i1 @usubo_ult_cmp_dominates_i64(i64 %x, i64 %y, ptr %p, i1 %cond) {
; RV32-NEXT: .cfi_offset s4, -24
; RV32-NEXT: .cfi_offset s5, -28
; RV32-NEXT: .cfi_offset s6, -32
-; RV32-NEXT: mv s5, a5
-; RV32-NEXT: mv s3, a1
+; RV32-NEXT: mv s1, a5
+; RV32-NEXT: mv s4, a1
; RV32-NEXT: andi a1, a5, 1
-; RV32-NEXT: beqz a1, .LBB32_8
+; RV32-NEXT: beqz a1, .LBB32_6
; RV32-NEXT: # %bb.1: # %t
; RV32-NEXT: mv s0, a4
-; RV32-NEXT: mv s2, a3
-; RV32-NEXT: mv s1, a2
-; RV32-NEXT: mv s4, a0
-; RV32-NEXT: beq s3, a3, .LBB32_3
+; RV32-NEXT: mv s3, a3
+; RV32-NEXT: mv s2, a2
+; RV32-NEXT: mv s5, a0
+; RV32-NEXT: beq s4, a3, .LBB32_3
; RV32-NEXT: # %bb.2: # %t
-; RV32-NEXT: sltu s6, s3, s2
+; RV32-NEXT: sltu s6, s4, s3
; RV32-NEXT: j .LBB32_4
; RV32-NEXT: .LBB32_3:
-; RV32-NEXT: sltu s6, s4, s1
+; RV32-NEXT: sltu s6, s5, s2
; RV32-NEXT: .LBB32_4: # %t
; RV32-NEXT: mv a0, s6
; RV32-NEXT: call call
-; RV32-NEXT: beqz s6, .LBB32_8
+; RV32-NEXT: beqz s6, .LBB32_6
; RV32-NEXT: # %bb.5: # %end
-; RV32-NEXT: sltu a1, s4, s1
-; RV32-NEXT: mv a0, a1
-; RV32-NEXT: beq s3, s2, .LBB32_7
-; RV32-NEXT: # %bb.6: # %end
-; RV32-NEXT: sltu a0, s3, s2
-; RV32-NEXT: .LBB32_7: # %end
-; RV32-NEXT: sub a2, s3, s2
-; RV32-NEXT: sub a3, s4, s1
-; RV32-NEXT: sub a2, a2, a1
-; RV32-NEXT: sw a3, 0(s0)
-; RV32-NEXT: sw a2, 4(s0)
-; RV32-NEXT: j .LBB32_9
-; RV32-NEXT: .LBB32_8: # %f
-; RV32-NEXT: mv a0, s5
-; RV32-NEXT: .LBB32_9: # %f
+; RV32-NEXT: sltu a0, s5, s2
+; RV32-NEXT: sub a1, s4, s3
+; RV32-NEXT: sub a2, s5, s2
+; RV32-NEXT: sub a1, a1, a0
+; RV32-NEXT: sw a2, 0(s0)
+; RV32-NEXT: sw a1, 4(s0)
+; RV32-NEXT: mv a0, s6
+; RV32-NEXT: j .LBB32_7
+; RV32-NEXT: .LBB32_6: # %f
+; RV32-NEXT: mv a0, s1
+; RV32-NEXT: .LBB32_7: # %f
; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
; RV32-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
diff --git a/llvm/test/CodeGen/SPIRV/non_int_constant_null.ll b/llvm/test/CodeGen/SPIRV/non_int_constant_null.ll
new file mode 100644
index 0000000..0ba016a
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/non_int_constant_null.ll
@@ -0,0 +1,25 @@
+; RUN: llc -mtriple spirv64-unknown-unknown %s --spirv-ext=+SPV_KHR_float_controls2 -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -mtriple spirv64-unknown-unknown %s --spirv-ext=+SPV_KHR_float_controls2 -o - -filetype=obj | spirv-val %}
+
+@A = addrspace(1) constant [1 x i8] zeroinitializer
+
+; CHECK: OpName %[[#FOO:]] "foo"
+; CHECK: OpName %[[#A:]] "A"
+; CHECK: OpDecorate %[[#A]] Constant
+; CHECK: OpDecorate %[[#A]] LinkageAttributes "A" Export
+; CHECK: %[[#INT8:]] = OpTypeInt 8 0
+; CHECK: %[[#INT32:]] = OpTypeInt 32 0
+; CHECK: %[[#ONE:]] = OpConstant %[[#INT32]] 1
+; CHECK: %[[#ARR_INT8:]] = OpTypeArray %[[#INT8]] %7
+; CHECK: %[[#ARR_INT8_PTR:]] = OpTypePointer CrossWorkgroup %[[#ARR_INT8]]
+; CHECK: %[[#ARR_INT8_ZERO:]] = OpConstantNull %[[#ARR_INT8]]
+; CHECK: %13 = OpVariable %[[#ARR_INT8_PTR]] CrossWorkgroup %[[#ARR_INT8_ZERO]]
+; CHECK: %[[#FOO]] = OpFunction
+; CHECK: = OpLabel
+; CHECK: OpReturn
+; CHECK: OpFunctionEnd
+
+define spir_kernel void @foo() {
+entry:
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/opencl/unpackhalf2x16-error.ll b/llvm/test/CodeGen/SPIRV/opencl/unpackhalf2x16-error.ll
new file mode 100644
index 0000000..1d3ba2a
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/opencl/unpackhalf2x16-error.ll
@@ -0,0 +1,11 @@
+; RUN: not llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o /dev/null 2>&1 | FileCheck %s
+; RUN: not llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown %s -o /dev/null 2>&1 | FileCheck %s
+
+; CHECK: LLVM ERROR: %5:vfid(<2 x s64>) = nnan ninf nsz arcp afn reassoc G_INTRINSIC intrinsic(@llvm.spv.unpackhalf2x16), %0:iid(s64) is only supported with the GLSL extended instruction set.
+
+define hidden spir_func noundef nofpclass(nan inf) float @_Z9test_funcj(i32 noundef %0) local_unnamed_addr #0 {
+ %2 = tail call reassoc nnan ninf nsz arcp afn <2 x float> @llvm.spv.unpackhalf2x16.v2f32(i32 %0)
+ %3 = extractelement <2 x float> %2, i64 0
+ ret float %3
+}
+
diff --git a/llvm/test/CodeGen/SPIRV/spirv_param_decorations_quals.ll b/llvm/test/CodeGen/SPIRV/spirv_param_decorations_quals.ll
index 260394b..fb550bb 100644
--- a/llvm/test/CodeGen/SPIRV/spirv_param_decorations_quals.ll
+++ b/llvm/test/CodeGen/SPIRV/spirv_param_decorations_quals.ll
@@ -7,9 +7,11 @@ entry:
; CHECK-SPIRV: OpDecorate %[[#PId:]] Volatile
; CHECK-SPIRV: OpDecorate %[[#PId]] FuncParamAttr NoAlias
+; CHECK-SPIRV: OpDecorate %[[#PId]] FuncParamAttr NoWrite
; CHECK-SPIRV: %[[#PId]] = OpFunctionParameter %[[#]]
!7 = !{!"volatile"}
!8 = !{i32 38, i32 4} ; FuncParamAttr NoAlias
-!9 = !{!8}
+!11 = !{i32 38, i32 6} ; FuncParamAttr NoWrite
+!9 = !{!8, !11}
!10 = !{!9}
diff --git a/llvm/test/CodeGen/SPIRV/zero-length-array.ll b/llvm/test/CodeGen/SPIRV/zero-length-array.ll
index 666176c..5fd94d2 100644
--- a/llvm/test/CodeGen/SPIRV/zero-length-array.ll
+++ b/llvm/test/CodeGen/SPIRV/zero-length-array.ll
@@ -1,10 +1,17 @@
; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv-unknown-vulkan-compute %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-vulkan-compute %s -o - -filetype=obj | spirv-val %}
-; CHECK: %[[#type:]] = OpTypeInt 32 0
-; CHECK: %[[#ext:]] = OpConstant %[[#type]] 0
+; Nothing is generated, but compilation doesn't crash.
+; CHECK: OpName %[[#FOO:]] "foo"
+; CHECK: OpName %[[#RTM:]] "reg2mem alloca point"
+; CHECK: %[[#INT:]] = OpTypeInt 32 0
+; CHECK: %[[#RTM]] = OpConstant %[[#INT]] 0
+; CHECK: %[[#FOO]] = OpFunction
+; CHECK-NEXT: = OpLabel
+; CHECK-NEXT: OpReturn
+; CHECK-NEXT: OpFunctionEnd
-define spir_func void @_Z3foov() {
+define spir_func void @foo() {
entry:
%i = alloca [0 x i32], align 4
ret void
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/constbound.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/constbound.ll
index 79665af..9632469 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/constbound.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/constbound.ll
@@ -7,22 +7,22 @@ define dso_local i32 @test_500_504(ptr nocapture readonly %x) {
; CHECK-NEXT: .save {r7, lr}
; CHECK-NEXT: push {r7, lr}
; CHECK-NEXT: mov.w lr, #126
-; CHECK-NEXT: adr r2, .LCPI0_0
-; CHECK-NEXT: vldrw.u32 q0, [r2]
-; CHECK-NEXT: mov.w r2, #500
-; CHECK-NEXT: vdup.32 q1, r2
-; CHECK-NEXT: movs r1, #0
+; CHECK-NEXT: adr r1, .LCPI0_0
+; CHECK-NEXT: vldrw.u32 q0, [r1]
+; CHECK-NEXT: mov.w r1, #500
+; CHECK-NEXT: mov.w r12, #0
+; CHECK-NEXT: vdup.32 q1, r1
; CHECK-NEXT: movs r2, #0
; CHECK-NEXT: .LBB0_1: @ %vector.body
; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: vqadd.u32 q2, q0, r1
-; CHECK-NEXT: adds r1, #4
+; CHECK-NEXT: vqadd.u32 q2, q0, r2
+; CHECK-NEXT: adds r2, #4
; CHECK-NEXT: vptt.u32 hi, q1, q2
; CHECK-NEXT: vldrwt.u32 q2, [r0], #16
-; CHECK-NEXT: vaddvat.u32 r2, q2
+; CHECK-NEXT: vaddvat.u32 r12, q2
; CHECK-NEXT: le lr, .LBB0_1
; CHECK-NEXT: @ %bb.2: @ %for.cond.cleanup
-; CHECK-NEXT: mov r0, r2
+; CHECK-NEXT: mov r0, r12
; CHECK-NEXT: pop {r7, pc}
; CHECK-NEXT: .p2align 4
; CHECK-NEXT: @ %bb.3:
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/minloop.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/minloop.ll
index ec257bc..bcedcd4 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/minloop.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/minloop.ll
@@ -28,29 +28,29 @@ define void @arm_min_q31(ptr nocapture readonly %pSrc, i32 %blockSize, ptr nocap
; CHECK-NEXT: str r6, [sp] @ 4-byte Spill
; CHECK-NEXT: subs r7, #4
; CHECK-NEXT: movs r6, #1
-; CHECK-NEXT: mov.w r8, #0
; CHECK-NEXT: mov.w r10, #0
+; CHECK-NEXT: mov.w r8, #0
; CHECK-NEXT: add.w lr, r6, r7, lsr #2
; CHECK-NEXT: .LBB0_5: @ %while.body
; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ldr r11, [r0, #16]!
-; CHECK-NEXT: ldrd r5, r7, [r0, #-12]
+; CHECK-NEXT: ldrd r5, r6, [r0, #-12]
; CHECK-NEXT: ldr r4, [r0, #-4]
; CHECK-NEXT: cmp r12, r5
; CHECK-NEXT: csel r5, r5, r12, gt
-; CHECK-NEXT: csinc r6, r10, r8, le
-; CHECK-NEXT: cmp r5, r7
+; CHECK-NEXT: csinc r7, r10, r8, le
+; CHECK-NEXT: cmp r5, r6
; CHECK-NEXT: it gt
-; CHECK-NEXT: addgt.w r6, r8, #2
-; CHECK-NEXT: csel r7, r7, r5, gt
-; CHECK-NEXT: cmp r7, r4
+; CHECK-NEXT: addgt.w r7, r8, #2
+; CHECK-NEXT: csel r6, r6, r5, gt
+; CHECK-NEXT: cmp r6, r4
; CHECK-NEXT: it gt
-; CHECK-NEXT: addgt.w r6, r8, #3
-; CHECK-NEXT: csel r7, r4, r7, gt
+; CHECK-NEXT: addgt.w r7, r8, #3
+; CHECK-NEXT: csel r6, r4, r6, gt
; CHECK-NEXT: add.w r8, r8, #4
-; CHECK-NEXT: cmp r7, r11
-; CHECK-NEXT: csel r10, r8, r6, gt
-; CHECK-NEXT: csel r12, r11, r7, gt
+; CHECK-NEXT: cmp r6, r11
+; CHECK-NEXT: csel r10, r8, r7, gt
+; CHECK-NEXT: csel r12, r11, r6, gt
; CHECK-NEXT: le lr, .LBB0_5
; CHECK-NEXT: @ %bb.6: @ %while.end.loopexit.unr-lcssa.loopexit
; CHECK-NEXT: ldr r6, [sp] @ 4-byte Reload
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/varying-outer-2d-reduction.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/varying-outer-2d-reduction.ll
index 1769c5d..98e082b 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/varying-outer-2d-reduction.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/varying-outer-2d-reduction.ll
@@ -21,11 +21,12 @@ define dso_local void @varying_outer_2d_reduction(ptr nocapture readonly %Input,
; ENABLED-NEXT: it lt
; ENABLED-NEXT: bxlt lr
; ENABLED-NEXT: .LBB0_1: @ %for.body.lr.ph
-; ENABLED-NEXT: push.w {r4, r5, r6, r7, r9, r10, r11, lr}
+; ENABLED-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; ENABLED-NEXT: mov r11, r0
-; ENABLED-NEXT: ldr r0, [sp, #32]
+; ENABLED-NEXT: ldr r0, [sp, #36]
; ENABLED-NEXT: add.w r9, r2, #3
; ENABLED-NEXT: mov.w r12, #0
+; ENABLED-NEXT: mov.w r8, #1
; ENABLED-NEXT: mov r10, r11
; ENABLED-NEXT: uxth r0, r0
; ENABLED-NEXT: rsbs r5, r0, #0
@@ -49,18 +50,16 @@ define dso_local void @varying_outer_2d_reduction(ptr nocapture readonly %Input,
; ENABLED-NEXT: @ %bb.5: @ %vector.ph
; ENABLED-NEXT: @ in Loop: Header=BB0_4 Depth=1
; ENABLED-NEXT: bic r0, r9, #3
-; ENABLED-NEXT: movs r7, #1
-; ENABLED-NEXT: subs r0, #4
; ENABLED-NEXT: sub.w r4, r2, r12
+; ENABLED-NEXT: subs r0, #4
; ENABLED-NEXT: vmov.i32 q1, #0x0
-; ENABLED-NEXT: add.w r6, r7, r0, lsr #2
+; ENABLED-NEXT: mov r7, r10
+; ENABLED-NEXT: add.w r6, r8, r0, lsr #2
; ENABLED-NEXT: adds r0, r2, #3
; ENABLED-NEXT: sub.w r0, r0, r12
; ENABLED-NEXT: bic r0, r0, #3
; ENABLED-NEXT: subs r0, #4
-; ENABLED-NEXT: add.w r0, r7, r0, lsr #2
-; ENABLED-NEXT: mov r7, r10
-; ENABLED-NEXT: dls lr, r0
+; ENABLED-NEXT: add.w lr, r8, r0, lsr #2
; ENABLED-NEXT: mov r0, r11
; ENABLED-NEXT: .LBB0_6: @ %vector.body
; ENABLED-NEXT: @ Parent Loop BB0_4 Depth=1
@@ -83,7 +82,7 @@ define dso_local void @varying_outer_2d_reduction(ptr nocapture readonly %Input,
; ENABLED-NEXT: vaddv.u32 r0, q0
; ENABLED-NEXT: b .LBB0_3
; ENABLED-NEXT: .LBB0_8:
-; ENABLED-NEXT: pop.w {r4, r5, r6, r7, r9, r10, r11, lr}
+; ENABLED-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; ENABLED-NEXT: bx lr
;
; NOREDUCTIONS-LABEL: varying_outer_2d_reduction:
@@ -92,11 +91,12 @@ define dso_local void @varying_outer_2d_reduction(ptr nocapture readonly %Input,
; NOREDUCTIONS-NEXT: it lt
; NOREDUCTIONS-NEXT: bxlt lr
; NOREDUCTIONS-NEXT: .LBB0_1: @ %for.body.lr.ph
-; NOREDUCTIONS-NEXT: push.w {r4, r5, r6, r7, r9, r10, r11, lr}
+; NOREDUCTIONS-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; NOREDUCTIONS-NEXT: mov r11, r0
-; NOREDUCTIONS-NEXT: ldr r0, [sp, #32]
+; NOREDUCTIONS-NEXT: ldr r0, [sp, #36]
; NOREDUCTIONS-NEXT: add.w r9, r2, #3
; NOREDUCTIONS-NEXT: mov.w r12, #0
+; NOREDUCTIONS-NEXT: mov.w r8, #1
; NOREDUCTIONS-NEXT: mov r10, r11
; NOREDUCTIONS-NEXT: uxth r0, r0
; NOREDUCTIONS-NEXT: rsbs r5, r0, #0
@@ -120,18 +120,16 @@ define dso_local void @varying_outer_2d_reduction(ptr nocapture readonly %Input,
; NOREDUCTIONS-NEXT: @ %bb.5: @ %vector.ph
; NOREDUCTIONS-NEXT: @ in Loop: Header=BB0_4 Depth=1
; NOREDUCTIONS-NEXT: bic r0, r9, #3
-; NOREDUCTIONS-NEXT: movs r7, #1
-; NOREDUCTIONS-NEXT: subs r0, #4
; NOREDUCTIONS-NEXT: sub.w r4, r2, r12
+; NOREDUCTIONS-NEXT: subs r0, #4
; NOREDUCTIONS-NEXT: vmov.i32 q1, #0x0
-; NOREDUCTIONS-NEXT: add.w r6, r7, r0, lsr #2
+; NOREDUCTIONS-NEXT: mov r7, r10
+; NOREDUCTIONS-NEXT: add.w r6, r8, r0, lsr #2
; NOREDUCTIONS-NEXT: adds r0, r2, #3
; NOREDUCTIONS-NEXT: sub.w r0, r0, r12
; NOREDUCTIONS-NEXT: bic r0, r0, #3
; NOREDUCTIONS-NEXT: subs r0, #4
-; NOREDUCTIONS-NEXT: add.w r0, r7, r0, lsr #2
-; NOREDUCTIONS-NEXT: mov r7, r10
-; NOREDUCTIONS-NEXT: dls lr, r0
+; NOREDUCTIONS-NEXT: add.w lr, r8, r0, lsr #2
; NOREDUCTIONS-NEXT: mov r0, r11
; NOREDUCTIONS-NEXT: .LBB0_6: @ %vector.body
; NOREDUCTIONS-NEXT: @ Parent Loop BB0_4 Depth=1
@@ -154,7 +152,7 @@ define dso_local void @varying_outer_2d_reduction(ptr nocapture readonly %Input,
; NOREDUCTIONS-NEXT: vaddv.u32 r0, q0
; NOREDUCTIONS-NEXT: b .LBB0_3
; NOREDUCTIONS-NEXT: .LBB0_8:
-; NOREDUCTIONS-NEXT: pop.w {r4, r5, r6, r7, r9, r10, r11, lr}
+; NOREDUCTIONS-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
; NOREDUCTIONS-NEXT: bx lr
entry:
%conv = sext i16 %N to i32
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/while-loops.ll b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/while-loops.ll
index cbcbf1f..435acc2 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/while-loops.ll
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/while-loops.ll
@@ -165,74 +165,73 @@ define dso_local i32 @b(ptr %c, i32 %d, i32 %e, ptr %n) "frame-pointer"="all" {
; CHECK-NEXT: sub sp, #16
; CHECK-NEXT: wls lr, r1, .LBB2_3
; CHECK-NEXT: @ %bb.1: @ %while.body.preheader
-; CHECK-NEXT: adds r6, r3, #4
-; CHECK-NEXT: adds r1, r0, #4
+; CHECK-NEXT: add.w r9, r3, #4
+; CHECK-NEXT: add.w r10, r0, #4
; CHECK-NEXT: mvn r8, #1
-; CHECK-NEXT: @ implicit-def: $r9
+; CHECK-NEXT: @ implicit-def: $r6
; CHECK-NEXT: @ implicit-def: $r4
; CHECK-NEXT: str r2, [sp] @ 4-byte Spill
; CHECK-NEXT: .LBB2_2: @ %while.body
; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: str r1, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT: ldr.w r1, [r10]
; CHECK-NEXT: asrs r2, r4, #31
-; CHECK-NEXT: ldr r1, [sp, #12] @ 4-byte Reload
-; CHECK-NEXT: ldr r1, [r1]
+; CHECK-NEXT: str r6, [sp, #4] @ 4-byte Spill
; CHECK-NEXT: muls r1, r3, r1
; CHECK-NEXT: adds r4, r4, r1
; CHECK-NEXT: adc.w r1, r2, r1, asr #31
; CHECK-NEXT: adds.w r2, r4, #-2147483648
-; CHECK-NEXT: ldrd r2, r4, [r8]
-; CHECK-NEXT: adc r5, r1, #0
-; CHECK-NEXT: str r2, [sp, #4] @ 4-byte Spill
-; CHECK-NEXT: smull r4, r2, r4, r9
-; CHECK-NEXT: asrs r1, r5, #31
+; CHECK-NEXT: ldrd r5, r4, [r8]
+; CHECK-NEXT: adc r2, r1, #0
; CHECK-NEXT: str r5, [sp, #8] @ 4-byte Spill
-; CHECK-NEXT: subs r4, r5, r4
-; CHECK-NEXT: sbcs r1, r2
-; CHECK-NEXT: ldr r2, [sp, #12] @ 4-byte Reload
-; CHECK-NEXT: adds.w r10, r4, #-2147483648
-; CHECK-NEXT: adc r1, r1, #0
-; CHECK-NEXT: ldr r4, [r2, #-4]
+; CHECK-NEXT: smull r4, r5, r4, r6
+; CHECK-NEXT: asrs r1, r2, #31
+; CHECK-NEXT: str r2, [sp, #12] @ 4-byte Spill
+; CHECK-NEXT: subs r4, r2, r4
+; CHECK-NEXT: sbcs r1, r5
+; CHECK-NEXT: adds.w r6, r4, #-2147483648
+; CHECK-NEXT: ldr r4, [r10, #-4]
+; CHECK-NEXT: adc r11, r1, #0
+; CHECK-NEXT: mov r1, r9
+; CHECK-NEXT: add.w r10, r10, #4
; CHECK-NEXT: muls r4, r3, r4
; CHECK-NEXT: adds r3, #4
; CHECK-NEXT: adds.w r12, r4, #-2147483648
; CHECK-NEXT: asr.w r5, r4, #31
-; CHECK-NEXT: ldr r4, [r6]
+; CHECK-NEXT: ldr.w r4, [r9]
; CHECK-NEXT: adc r5, r5, #0
; CHECK-NEXT: mul r2, r4, r0
-; CHECK-NEXT: adds r0, #4
; CHECK-NEXT: add.w r2, r2, #-2147483648
; CHECK-NEXT: asrl r12, r5, r2
-; CHECK-NEXT: smull r2, r5, r4, r12
-; CHECK-NEXT: lsll r2, r5, #30
-; CHECK-NEXT: ldr r2, [sp, #4] @ 4-byte Reload
-; CHECK-NEXT: asr.w r11, r5, #31
-; CHECK-NEXT: mov r12, r5
-; CHECK-NEXT: lsll r12, r11, r4
-; CHECK-NEXT: mul r2, r2, r9
-; CHECK-NEXT: lsrl r12, r11, #2
-; CHECK-NEXT: adds r2, #2
-; CHECK-NEXT: lsll r12, r11, r2
+; CHECK-NEXT: smull r2, r9, r4, r12
+; CHECK-NEXT: mov r12, r0
+; CHECK-NEXT: lsll r2, r9, #30
+; CHECK-NEXT: asr.w r5, r9, #31
+; CHECK-NEXT: mov r2, r9
+; CHECK-NEXT: mov r9, r1
+; CHECK-NEXT: ldrd r1, r0, [sp, #4] @ 8-byte Folded Reload
+; CHECK-NEXT: lsll r2, r5, r4
+; CHECK-NEXT: lsrl r2, r5, #2
+; CHECK-NEXT: muls r0, r1, r0
+; CHECK-NEXT: ldr r1, [sp, #12] @ 4-byte Reload
+; CHECK-NEXT: adds r0, #2
+; CHECK-NEXT: lsll r2, r5, r0
+; CHECK-NEXT: add.w r0, r2, #-2147483648
; CHECK-NEXT: ldr r2, [sp] @ 4-byte Reload
-; CHECK-NEXT: add.w r5, r12, #-2147483648
-; CHECK-NEXT: asrl r10, r1, r5
-; CHECK-NEXT: ldr r5, [sp, #8] @ 4-byte Reload
-; CHECK-NEXT: lsrl r10, r1, #2
-; CHECK-NEXT: movs r1, #2
-; CHECK-NEXT: mov r9, r10
-; CHECK-NEXT: str.w r10, [r1]
-; CHECK-NEXT: ldr r1, [r8], #-4
-; CHECK-NEXT: mls r5, r1, r4, r5
-; CHECK-NEXT: adds.w r4, r5, #-2147483648
-; CHECK-NEXT: asr.w r1, r5, #31
+; CHECK-NEXT: asrl r6, r11, r0
+; CHECK-NEXT: movs r0, #2
+; CHECK-NEXT: lsrl r6, r11, #2
+; CHECK-NEXT: str r6, [r0]
+; CHECK-NEXT: ldr r0, [r8], #-4
+; CHECK-NEXT: mls r0, r0, r4, r1
+; CHECK-NEXT: adds.w r4, r0, #-2147483648
+; CHECK-NEXT: asr.w r1, r0, #31
; CHECK-NEXT: adc r1, r1, #0
; CHECK-NEXT: lsrl r4, r1, #2
-; CHECK-NEXT: rsbs r1, r4, #0
-; CHECK-NEXT: str r1, [r2]
-; CHECK-NEXT: str r1, [r6, #-4]
-; CHECK-NEXT: adds r6, #4
-; CHECK-NEXT: ldr r1, [sp, #12] @ 4-byte Reload
-; CHECK-NEXT: adds r1, #4
+; CHECK-NEXT: rsbs r0, r4, #0
+; CHECK-NEXT: str r0, [r2]
+; CHECK-NEXT: str r0, [r9, #-4]
+; CHECK-NEXT: add.w r9, r9, #4
+; CHECK-NEXT: add.w r0, r12, #4
; CHECK-NEXT: le lr, .LBB2_2
; CHECK-NEXT: .LBB2_3: @ %while.end
; CHECK-NEXT: add sp, #16
diff --git a/llvm/test/CodeGen/Thumb2/mve-float32regloops.ll b/llvm/test/CodeGen/Thumb2/mve-float32regloops.ll
index f7b4548..b6657d6 100644
--- a/llvm/test/CodeGen/Thumb2/mve-float32regloops.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-float32regloops.ll
@@ -1573,120 +1573,115 @@ define arm_aapcs_vfpcc void @arm_biquad_cascade_df1_f32(ptr nocapture readonly %
; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
; CHECK-NEXT: .pad #16
; CHECK-NEXT: sub sp, #16
-; CHECK-NEXT: ldrd r7, r9, [r0]
-; CHECK-NEXT: and r6, r3, #3
-; CHECK-NEXT: ldr r0, [r0, #8]
-; CHECK-NEXT: lsrs r3, r3, #2
-; CHECK-NEXT: @ implicit-def: $r12
-; CHECK-NEXT: str r6, [sp, #4] @ 4-byte Spill
-; CHECK-NEXT: str r3, [sp] @ 4-byte Spill
-; CHECK-NEXT: str r2, [sp, #8] @ 4-byte Spill
+; CHECK-NEXT: ldm.w r0, {r7, r9, r11}
+; CHECK-NEXT: and r0, r3, #3
+; CHECK-NEXT: @ implicit-def: $r5
+; CHECK-NEXT: str r0, [sp, #8] @ 4-byte Spill
+; CHECK-NEXT: lsrs r0, r3, #2
+; CHECK-NEXT: str r0, [sp, #4] @ 4-byte Spill
; CHECK-NEXT: b .LBB19_3
; CHECK-NEXT: .LBB19_1: @ in Loop: Header=BB19_3 Depth=1
-; CHECK-NEXT: mov r3, r8
-; CHECK-NEXT: mov r2, r5
-; CHECK-NEXT: mov r4, r11
-; CHECK-NEXT: mov r8, r10
+; CHECK-NEXT: mov r8, r3
+; CHECK-NEXT: mov r3, r12
+; CHECK-NEXT: mov r0, r4
+; CHECK-NEXT: mov r12, r10
; CHECK-NEXT: .LBB19_2: @ %if.end69
; CHECK-NEXT: @ in Loop: Header=BB19_3 Depth=1
; CHECK-NEXT: ldr r7, [sp, #12] @ 4-byte Reload
-; CHECK-NEXT: adds r0, #128
-; CHECK-NEXT: strd r2, r4, [r9]
-; CHECK-NEXT: ldr r2, [sp, #8] @ 4-byte Reload
-; CHECK-NEXT: subs r7, #1
-; CHECK-NEXT: strd r3, r8, [r9, #8]
-; CHECK-NEXT: add.w r9, r9, #16
+; CHECK-NEXT: add.w r11, r11, #128
+; CHECK-NEXT: strd r8, r0, [r9]
; CHECK-NEXT: mov r1, r2
+; CHECK-NEXT: strd r3, r12, [r9, #8]
+; CHECK-NEXT: add.w r9, r9, #16
+; CHECK-NEXT: subs r7, #1
; CHECK-NEXT: beq.w .LBB19_13
; CHECK-NEXT: .LBB19_3: @ %do.body
; CHECK-NEXT: @ =>This Loop Header: Depth=1
; CHECK-NEXT: @ Child Loop BB19_5 Depth 2
-; CHECK-NEXT: ldrd r5, r11, [r9]
+; CHECK-NEXT: ldr.w r10, [r9, #12]
; CHECK-NEXT: mov r6, r2
-; CHECK-NEXT: ldrd r8, r10, [r9, #8]
-; CHECK-NEXT: ldr r2, [sp] @ 4-byte Reload
+; CHECK-NEXT: ldm.w r9, {r3, r4, r12}
+; CHECK-NEXT: ldr r0, [sp, #4] @ 4-byte Reload
; CHECK-NEXT: str r7, [sp, #12] @ 4-byte Spill
-; CHECK-NEXT: wls lr, r2, .LBB19_6
+; CHECK-NEXT: wls lr, r0, .LBB19_6
; CHECK-NEXT: @ %bb.4: @ %while.body.lr.ph
; CHECK-NEXT: @ in Loop: Header=BB19_3 Depth=1
-; CHECK-NEXT: ldr r6, [sp, #8] @ 4-byte Reload
-; CHECK-NEXT: mov r4, r11
-; CHECK-NEXT: mov r3, r5
+; CHECK-NEXT: mov r6, r2
; CHECK-NEXT: .LBB19_5: @ %while.body
; CHECK-NEXT: @ Parent Loop BB19_3 Depth=1
; CHECK-NEXT: @ => This Inner Loop Header: Depth=2
-; CHECK-NEXT: ldr r5, [r1, #12]
-; CHECK-NEXT: vldrw.u32 q2, [r0]
-; CHECK-NEXT: vldrw.u32 q6, [r0, #16]
-; CHECK-NEXT: ldm.w r1, {r2, r7, r11}
-; CHECK-NEXT: vmul.f32 q2, q2, r5
-; CHECK-NEXT: vldrw.u32 q7, [r0, #32]
-; CHECK-NEXT: vfma.f32 q2, q6, r11
-; CHECK-NEXT: vldrw.u32 q4, [r0, #48]
+; CHECK-NEXT: mov r5, r3
+; CHECK-NEXT: mov r8, r4
+; CHECK-NEXT: ldrd r4, r3, [r1, #8]
+; CHECK-NEXT: vldrw.u32 q2, [r11]
+; CHECK-NEXT: vldrw.u32 q6, [r11, #16]
+; CHECK-NEXT: ldrd r0, r7, [r1]
+; CHECK-NEXT: vmul.f32 q2, q2, r3
+; CHECK-NEXT: vldrw.u32 q7, [r11, #32]
+; CHECK-NEXT: vfma.f32 q2, q6, r4
+; CHECK-NEXT: vldrw.u32 q4, [r11, #48]
; CHECK-NEXT: vfma.f32 q2, q7, r7
-; CHECK-NEXT: vldrw.u32 q5, [r0, #64]
-; CHECK-NEXT: vfma.f32 q2, q4, r2
-; CHECK-NEXT: vldrw.u32 q3, [r0, #80]
-; CHECK-NEXT: vfma.f32 q2, q5, r3
-; CHECK-NEXT: vldrw.u32 q1, [r0, #96]
-; CHECK-NEXT: vfma.f32 q2, q3, r4
-; CHECK-NEXT: vldrw.u32 q0, [r0, #112]
-; CHECK-NEXT: vfma.f32 q2, q1, r8
+; CHECK-NEXT: vldrw.u32 q5, [r11, #64]
+; CHECK-NEXT: vfma.f32 q2, q4, r0
+; CHECK-NEXT: vldrw.u32 q3, [r11, #80]
+; CHECK-NEXT: vfma.f32 q2, q5, r5
+; CHECK-NEXT: vldrw.u32 q1, [r11, #96]
+; CHECK-NEXT: vfma.f32 q2, q3, r8
+; CHECK-NEXT: vldrw.u32 q0, [r11, #112]
+; CHECK-NEXT: vfma.f32 q2, q1, r12
; CHECK-NEXT: adds r1, #16
; CHECK-NEXT: vfma.f32 q2, q0, r10
-; CHECK-NEXT: mov r4, r11
-; CHECK-NEXT: vmov r10, r8, d5
+; CHECK-NEXT: mov r5, r3
+; CHECK-NEXT: vmov r10, r12, d5
; CHECK-NEXT: vstrb.8 q2, [r6], #16
-; CHECK-NEXT: mov r3, r5
-; CHECK-NEXT: mov r12, r5
; CHECK-NEXT: le lr, .LBB19_5
; CHECK-NEXT: .LBB19_6: @ %while.end
; CHECK-NEXT: @ in Loop: Header=BB19_3 Depth=1
-; CHECK-NEXT: ldr r3, [sp, #4] @ 4-byte Reload
-; CHECK-NEXT: cmp r3, #0
+; CHECK-NEXT: ldr r7, [sp, #8] @ 4-byte Reload
+; CHECK-NEXT: cmp r7, #0
; CHECK-NEXT: beq .LBB19_1
; CHECK-NEXT: @ %bb.7: @ %if.then
; CHECK-NEXT: @ in Loop: Header=BB19_3 Depth=1
-; CHECK-NEXT: ldrd lr, r4, [r1]
-; CHECK-NEXT: vldrw.u32 q0, [r0]
-; CHECK-NEXT: ldrd r2, r1, [r1, #8]
-; CHECK-NEXT: vldrw.u32 q6, [r0, #16]
-; CHECK-NEXT: vldrw.u32 q7, [r0, #32]
-; CHECK-NEXT: vldrw.u32 q4, [r0, #48]
+; CHECK-NEXT: ldrd lr, r0, [r1]
+; CHECK-NEXT: vldrw.u32 q0, [r11]
+; CHECK-NEXT: ldrd r8, r1, [r1, #8]
+; CHECK-NEXT: vldrw.u32 q6, [r11, #16]
+; CHECK-NEXT: vldrw.u32 q7, [r11, #32]
+; CHECK-NEXT: vldrw.u32 q4, [r11, #48]
; CHECK-NEXT: vmul.f32 q0, q0, r1
-; CHECK-NEXT: vldrw.u32 q5, [r0, #64]
-; CHECK-NEXT: vfma.f32 q0, q6, r2
-; CHECK-NEXT: vldrw.u32 q3, [r0, #80]
-; CHECK-NEXT: vfma.f32 q0, q7, r4
-; CHECK-NEXT: vldrw.u32 q2, [r0, #96]
+; CHECK-NEXT: vldrw.u32 q5, [r11, #64]
+; CHECK-NEXT: vfma.f32 q0, q6, r8
+; CHECK-NEXT: vldrw.u32 q3, [r11, #80]
+; CHECK-NEXT: vfma.f32 q0, q7, r0
+; CHECK-NEXT: vldrw.u32 q2, [r11, #96]
; CHECK-NEXT: vfma.f32 q0, q4, lr
-; CHECK-NEXT: vldrw.u32 q1, [r0, #112]
-; CHECK-NEXT: vfma.f32 q0, q5, r5
-; CHECK-NEXT: cmp r3, #1
-; CHECK-NEXT: vfma.f32 q0, q3, r11
-; CHECK-NEXT: vfma.f32 q0, q2, r8
+; CHECK-NEXT: vldrw.u32 q1, [r11, #112]
+; CHECK-NEXT: vfma.f32 q0, q5, r3
+; CHECK-NEXT: cmp r7, #1
+; CHECK-NEXT: vfma.f32 q0, q3, r4
+; CHECK-NEXT: vfma.f32 q0, q2, r12
; CHECK-NEXT: vfma.f32 q0, q1, r10
-; CHECK-NEXT: vmov r5, s0
+; CHECK-NEXT: vmov r4, s0
; CHECK-NEXT: bne .LBB19_9
; CHECK-NEXT: @ %bb.8: @ %if.then58
; CHECK-NEXT: @ in Loop: Header=BB19_3 Depth=1
-; CHECK-NEXT: str r5, [r6]
-; CHECK-NEXT: mov r2, lr
-; CHECK-NEXT: mov r4, r12
-; CHECK-NEXT: mov r3, r5
+; CHECK-NEXT: str r4, [r6]
+; CHECK-NEXT: mov r8, lr
+; CHECK-NEXT: mov r0, r5
+; CHECK-NEXT: mov r3, r4
; CHECK-NEXT: b .LBB19_12
; CHECK-NEXT: .LBB19_9: @ %if.else
; CHECK-NEXT: @ in Loop: Header=BB19_3 Depth=1
-; CHECK-NEXT: vmov r8, s1
-; CHECK-NEXT: cmp r3, #2
+; CHECK-NEXT: vmov r12, s1
+; CHECK-NEXT: cmp r7, #2
; CHECK-NEXT: vstr s1, [r6, #4]
-; CHECK-NEXT: str r5, [r6]
+; CHECK-NEXT: str r4, [r6]
; CHECK-NEXT: bne .LBB19_11
; CHECK-NEXT: @ %bb.10: @ in Loop: Header=BB19_3 Depth=1
-; CHECK-NEXT: mov r2, r4
-; CHECK-NEXT: mov r3, r8
-; CHECK-NEXT: mov r4, lr
-; CHECK-NEXT: mov r8, r5
+; CHECK-NEXT: mov r8, r0
+; CHECK-NEXT: mov r3, r12
+; CHECK-NEXT: mov r0, lr
+; CHECK-NEXT: mov r12, r4
; CHECK-NEXT: b .LBB19_12
; CHECK-NEXT: .LBB19_11: @ %if.else64
; CHECK-NEXT: @ in Loop: Header=BB19_3 Depth=1
@@ -1694,7 +1689,7 @@ define arm_aapcs_vfpcc void @arm_biquad_cascade_df1_f32(ptr nocapture readonly %
; CHECK-NEXT: vstr s2, [r6, #8]
; CHECK-NEXT: .LBB19_12: @ %if.end69
; CHECK-NEXT: @ in Loop: Header=BB19_3 Depth=1
-; CHECK-NEXT: mov r12, r1
+; CHECK-NEXT: mov r5, r1
; CHECK-NEXT: b .LBB19_2
; CHECK-NEXT: .LBB19_13: @ %do.end
; CHECK-NEXT: add sp, #16
@@ -1901,8 +1896,8 @@ define void @arm_biquad_cascade_df2T_f32(ptr nocapture readonly %S, ptr nocaptur
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: .save {r4, r5, r6, r7, r8, lr}
; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, lr}
-; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13}
-; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13}
+; CHECK-NEXT: .vsave {d8, d9, d10, d11}
+; CHECK-NEXT: vpush {d8, d9, d10, d11}
; CHECK-NEXT: ldrd r6, r12, [r0, #4]
; CHECK-NEXT: lsr.w r8, r3, #1
; CHECK-NEXT: ldrb r0, [r0]
@@ -1910,11 +1905,11 @@ define void @arm_biquad_cascade_df2T_f32(ptr nocapture readonly %S, ptr nocaptur
; CHECK-NEXT: b .LBB20_3
; CHECK-NEXT: .LBB20_1: @ %if.else
; CHECK-NEXT: @ in Loop: Header=BB20_3 Depth=1
-; CHECK-NEXT: vmov.f32 s14, s13
-; CHECK-NEXT: vstr s12, [r6]
+; CHECK-NEXT: vmov.f32 s6, s5
+; CHECK-NEXT: vstr s4, [r6]
; CHECK-NEXT: .LBB20_2: @ %if.end
; CHECK-NEXT: @ in Loop: Header=BB20_3 Depth=1
-; CHECK-NEXT: vstr s14, [r6, #4]
+; CHECK-NEXT: vstr s6, [r6, #4]
; CHECK-NEXT: add.w r12, r12, #20
; CHECK-NEXT: adds r6, #8
; CHECK-NEXT: subs r0, #1
@@ -1923,41 +1918,39 @@ define void @arm_biquad_cascade_df2T_f32(ptr nocapture readonly %S, ptr nocaptur
; CHECK-NEXT: .LBB20_3: @ %do.body
; CHECK-NEXT: @ =>This Loop Header: Depth=1
; CHECK-NEXT: @ Child Loop BB20_5 Depth 2
-; CHECK-NEXT: vldrw.u32 q2, [r12]
+; CHECK-NEXT: vldrw.u32 q3, [r12]
; CHECK-NEXT: movs r5, #0
-; CHECK-NEXT: vmov q4, q2
+; CHECK-NEXT: vmov q4, q3
; CHECK-NEXT: vshlc q4, r5, #32
-; CHECK-NEXT: vldrw.u32 q1, [r12, #8]
-; CHECK-NEXT: vmov q5, q1
+; CHECK-NEXT: vldrw.u32 q2, [r12, #8]
+; CHECK-NEXT: vmov q5, q2
; CHECK-NEXT: vshlc q5, r5, #32
-; CHECK-NEXT: vldrw.u32 q3, [r6]
-; CHECK-NEXT: vmov.f32 s14, s0
+; CHECK-NEXT: vldrw.u32 q1, [r6]
+; CHECK-NEXT: vmov.f32 s6, s0
; CHECK-NEXT: mov r5, r2
-; CHECK-NEXT: vmov.f32 s15, s0
+; CHECK-NEXT: vmov.f32 s7, s0
; CHECK-NEXT: wls lr, r8, .LBB20_6
; CHECK-NEXT: @ %bb.4: @ %while.body.preheader
; CHECK-NEXT: @ in Loop: Header=BB20_3 Depth=1
-; CHECK-NEXT: vmov q6, q3
; CHECK-NEXT: mov r5, r2
; CHECK-NEXT: .LBB20_5: @ %while.body
; CHECK-NEXT: @ Parent Loop BB20_3 Depth=1
; CHECK-NEXT: @ => This Inner Loop Header: Depth=2
; CHECK-NEXT: ldrd r7, r4, [r1], #8
-; CHECK-NEXT: vfma.f32 q6, q2, r7
-; CHECK-NEXT: vmov r7, s24
-; CHECK-NEXT: vmov q3, q6
-; CHECK-NEXT: vfma.f32 q3, q1, r7
-; CHECK-NEXT: vstr s24, [r5]
-; CHECK-NEXT: vmov.f32 s15, s0
-; CHECK-NEXT: vfma.f32 q3, q4, r4
-; CHECK-NEXT: vmov r4, s13
-; CHECK-NEXT: vstr s13, [r5, #4]
-; CHECK-NEXT: vfma.f32 q3, q5, r4
+; CHECK-NEXT: vfma.f32 q1, q3, r7
+; CHECK-NEXT: vmov r7, s4
+; CHECK-NEXT: vmov.f32 s2, s4
+; CHECK-NEXT: vfma.f32 q1, q2, r7
+; CHECK-NEXT: vmov.f32 s7, s0
+; CHECK-NEXT: vfma.f32 q1, q4, r4
+; CHECK-NEXT: vmov r4, s5
+; CHECK-NEXT: vstr s5, [r5, #4]
+; CHECK-NEXT: vfma.f32 q1, q5, r4
+; CHECK-NEXT: vmov.f32 s4, s6
+; CHECK-NEXT: vmov.f32 s5, s7
+; CHECK-NEXT: vmov.f32 s6, s0
+; CHECK-NEXT: vstr s2, [r5]
; CHECK-NEXT: adds r5, #8
-; CHECK-NEXT: vmov.f32 s12, s14
-; CHECK-NEXT: vmov.f32 s13, s15
-; CHECK-NEXT: vmov.f32 s14, s0
-; CHECK-NEXT: vmov q6, q3
; CHECK-NEXT: le lr, .LBB20_5
; CHECK-NEXT: .LBB20_6: @ %while.end
; CHECK-NEXT: @ in Loop: Header=BB20_3 Depth=1
@@ -1966,14 +1959,14 @@ define void @arm_biquad_cascade_df2T_f32(ptr nocapture readonly %S, ptr nocaptur
; CHECK-NEXT: @ %bb.7: @ %if.then
; CHECK-NEXT: @ in Loop: Header=BB20_3 Depth=1
; CHECK-NEXT: ldr r1, [r1]
-; CHECK-NEXT: vfma.f32 q3, q2, r1
-; CHECK-NEXT: vmov r1, s12
-; CHECK-NEXT: vstr s12, [r5]
-; CHECK-NEXT: vfma.f32 q3, q1, r1
-; CHECK-NEXT: vstr s13, [r6]
+; CHECK-NEXT: vfma.f32 q1, q3, r1
+; CHECK-NEXT: vmov r1, s4
+; CHECK-NEXT: vstr s4, [r5]
+; CHECK-NEXT: vfma.f32 q1, q2, r1
+; CHECK-NEXT: vstr s5, [r6]
; CHECK-NEXT: b .LBB20_2
; CHECK-NEXT: .LBB20_8: @ %do.end
-; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13}
+; CHECK-NEXT: vpop {d8, d9, d10, d11}
; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, pc}
; CHECK-NEXT: .p2align 2
; CHECK-NEXT: @ %bb.9:
diff --git a/llvm/test/CodeGen/Thumb2/mve-gather-increment.ll b/llvm/test/CodeGen/Thumb2/mve-gather-increment.ll
index 0d86f22..b60ee7c 100644
--- a/llvm/test/CodeGen/Thumb2/mve-gather-increment.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-gather-increment.ll
@@ -1313,27 +1313,29 @@ define arm_aapcs_vfpcc void @gather_inc_v16i8_simple(ptr noalias nocapture reado
; CHECK-NEXT: @ Child Loop BB16_3 Depth 2
; CHECK-NEXT: ldr.w r8, [sp, #56] @ 4-byte Reload
; CHECK-NEXT: vldrw.u32 q5, [sp] @ 16-byte Reload
-; CHECK-NEXT: vldrw.u32 q0, [sp, #16] @ 16-byte Reload
+; CHECK-NEXT: vldrw.u32 q6, [sp, #16] @ 16-byte Reload
; CHECK-NEXT: vldrw.u32 q7, [sp, #32] @ 16-byte Reload
; CHECK-NEXT: vmov q4, q3
; CHECK-NEXT: .LBB16_3: @ %vector.body
; CHECK-NEXT: @ Parent Loop BB16_2 Depth=1
; CHECK-NEXT: @ => This Inner Loop Header: Depth=2
-; CHECK-NEXT: vadd.i32 q1, q5, r0
+; CHECK-NEXT: vmov q0, q6
+; CHECK-NEXT: vadd.i32 q6, q5, r0
+; CHECK-NEXT: vmov r7, r3, d13
; CHECK-NEXT: vadd.i32 q2, q4, r0
-; CHECK-NEXT: vmov r7, r3, d3
-; CHECK-NEXT: vadd.i32 q6, q0, lr
; CHECK-NEXT: vmov r5, r6, d5
+; CHECK-NEXT: vmov q1, q7
+; CHECK-NEXT: vmov r4, r10, d12
+; CHECK-NEXT: vadd.i32 q6, q0, lr
; CHECK-NEXT: subs.w r9, r9, #16
-; CHECK-NEXT: vmov r4, r10, d2
-; CHECK-NEXT: vadd.i32 q1, q7, lr
; CHECK-NEXT: vadd.i32 q4, q4, lr
; CHECK-NEXT: vadd.i32 q5, q5, lr
+; CHECK-NEXT: vadd.i32 q7, q7, lr
; CHECK-NEXT: ldrb.w r11, [r3]
; CHECK-NEXT: ldrb r3, [r7]
; CHECK-NEXT: vmov r7, r12, d4
-; CHECK-NEXT: vadd.i32 q2, q7, r0
-; CHECK-NEXT: vadd.i32 q7, q0, r0
+; CHECK-NEXT: vadd.i32 q2, q1, r0
+; CHECK-NEXT: vadd.i32 q1, q0, r0
; CHECK-NEXT: ldrb r5, [r5]
; CHECK-NEXT: ldrb r6, [r6]
; CHECK-NEXT: ldrb r4, [r4]
@@ -1342,7 +1344,7 @@ define arm_aapcs_vfpcc void @gather_inc_v16i8_simple(ptr noalias nocapture reado
; CHECK-NEXT: ldrb.w r1, [r12]
; CHECK-NEXT: vmov.8 q0[0], r7
; CHECK-NEXT: vmov.8 q0[1], r1
-; CHECK-NEXT: vmov r1, r7, d15
+; CHECK-NEXT: vmov r1, r7, d3
; CHECK-NEXT: vmov.8 q0[2], r5
; CHECK-NEXT: vmov.8 q0[3], r6
; CHECK-NEXT: vmov.8 q0[4], r4
@@ -1357,8 +1359,7 @@ define arm_aapcs_vfpcc void @gather_inc_v16i8_simple(ptr noalias nocapture reado
; CHECK-NEXT: ldrb r3, [r5]
; CHECK-NEXT: ldrb.w r12, [r7]
; CHECK-NEXT: ldrb r5, [r4]
-; CHECK-NEXT: vmov r4, r7, d14
-; CHECK-NEXT: vmov q7, q1
+; CHECK-NEXT: vmov r4, r7, d2
; CHECK-NEXT: ldrb r4, [r4]
; CHECK-NEXT: ldrb r7, [r7]
; CHECK-NEXT: vmov.8 q0[8], r4
@@ -1370,7 +1371,6 @@ define arm_aapcs_vfpcc void @gather_inc_v16i8_simple(ptr noalias nocapture reado
; CHECK-NEXT: vmov.8 q0[14], r3
; CHECK-NEXT: vmov.8 q0[15], r12
; CHECK-NEXT: vstrb.8 q0, [r8], #16
-; CHECK-NEXT: vmov q0, q6
; CHECK-NEXT: bne .LBB16_3
; CHECK-NEXT: @ %bb.4: @ %middle.block
; CHECK-NEXT: @ in Loop: Header=BB16_2 Depth=1
diff --git a/llvm/test/CodeGen/Thumb2/mve-gather-scatter-optimisation.ll b/llvm/test/CodeGen/Thumb2/mve-gather-scatter-optimisation.ll
index eedca2c..c0b2da7 100644
--- a/llvm/test/CodeGen/Thumb2/mve-gather-scatter-optimisation.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-gather-scatter-optimisation.ll
@@ -236,11 +236,11 @@ define arm_aapcs_vfpcc void @push_out_mul_gather_scatter(ptr noalias nocapture r
; CHECK-NEXT: vldrw.u32 q1, [r1]
; CHECK-NEXT: .LBB5_1: @ %vector.body
; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: vldrw.u32 q2, [r0, q1, uxtw #2]
-; CHECK-NEXT: vadd.i32 q3, q1, q0
+; CHECK-NEXT: vldrw.u32 q3, [r0, q1, uxtw #2]
; CHECK-NEXT: subs r2, #4
-; CHECK-NEXT: vstrw.32 q2, [r0, q1, uxtw #2]
-; CHECK-NEXT: vmov q1, q3
+; CHECK-NEXT: vmov q2, q1
+; CHECK-NEXT: vadd.i32 q1, q1, q0
+; CHECK-NEXT: vstrw.32 q3, [r0, q2, uxtw #2]
; CHECK-NEXT: bne .LBB5_1
; CHECK-NEXT: @ %bb.2: @ %end
; CHECK-NEXT: bx lr
@@ -330,20 +330,20 @@ define arm_aapcs_vfpcc void @non_gatscat_use1(ptr noalias nocapture readonly %da
; CHECK-NEXT: vpush {d8, d9}
; CHECK-NEXT: adr r4, .LCPI7_0
; CHECK-NEXT: mov.w r12, #9
-; CHECK-NEXT: vldrw.u32 q1, [r4]
+; CHECK-NEXT: vldrw.u32 q0, [r4]
; CHECK-NEXT: mov.w lr, #12
; CHECK-NEXT: movs r4, #8
-; CHECK-NEXT: vdup.32 q0, r0
+; CHECK-NEXT: vdup.32 q1, r0
; CHECK-NEXT: .LBB7_1: @ %vector.body
; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: vmov q3, q0
-; CHECK-NEXT: vadd.i32 q2, q1, r4
-; CHECK-NEXT: vmla.i32 q3, q1, lr
-; CHECK-NEXT: vmul.i32 q1, q1, r12
-; CHECK-NEXT: vldrw.u32 q4, [q3, #24]
+; CHECK-NEXT: vmov q2, q0
+; CHECK-NEXT: vmov q3, q1
+; CHECK-NEXT: vmla.i32 q3, q2, lr
; CHECK-NEXT: subs r2, #4
-; CHECK-NEXT: vstrw.32 q1, [r3]
-; CHECK-NEXT: vmov q1, q2
+; CHECK-NEXT: vldrw.u32 q4, [q3, #24]
+; CHECK-NEXT: vmul.i32 q2, q2, r12
+; CHECK-NEXT: vadd.i32 q0, q0, r4
+; CHECK-NEXT: vstrw.32 q2, [r3]
; CHECK-NEXT: vstrb.8 q4, [r1], #16
; CHECK-NEXT: bne .LBB7_1
; CHECK-NEXT: @ %bb.2: @ %end
@@ -390,22 +390,22 @@ define arm_aapcs_vfpcc void @non_gatscat_use2(ptr noalias nocapture readonly %da
; CHECK-NEXT: vpush {d8, d9, d10, d11}
; CHECK-NEXT: adr r4, .LCPI8_0
; CHECK-NEXT: movs r5, #18
-; CHECK-NEXT: vldrw.u32 q2, [r4]
+; CHECK-NEXT: vldrw.u32 q0, [r4]
; CHECK-NEXT: mov.w r12, #9
; CHECK-NEXT: mov.w lr, #12
; CHECK-NEXT: movs r4, #8
-; CHECK-NEXT: vdup.32 q0, r0
-; CHECK-NEXT: vdup.32 q1, r5
+; CHECK-NEXT: vdup.32 q1, r0
+; CHECK-NEXT: vdup.32 q2, r5
; CHECK-NEXT: .LBB8_1: @ %vector.body
; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: vmov q4, q0
-; CHECK-NEXT: vadd.i32 q3, q2, r4
-; CHECK-NEXT: vmla.i32 q4, q2, lr
+; CHECK-NEXT: vmov q3, q0
+; CHECK-NEXT: vmov q4, q1
+; CHECK-NEXT: vmla.i32 q4, q3, lr
; CHECK-NEXT: subs r2, #4
; CHECK-NEXT: vldrw.u32 q5, [q4, #24]
-; CHECK-NEXT: vmov q4, q1
-; CHECK-NEXT: vmla.i32 q4, q2, r12
-; CHECK-NEXT: vmov q2, q3
+; CHECK-NEXT: vmov q4, q2
+; CHECK-NEXT: vmla.i32 q4, q3, r12
+; CHECK-NEXT: vadd.i32 q0, q0, r4
; CHECK-NEXT: vstrb.8 q5, [r1], #16
; CHECK-NEXT: vstrw.32 q4, [r3]
; CHECK-NEXT: bne .LBB8_1
@@ -487,21 +487,21 @@ define dso_local void @arm_mat_mult_q31(ptr noalias nocapture readonly %A, ptr n
; CHECK-NEXT: @ => This Loop Header: Depth=2
; CHECK-NEXT: @ Child Loop BB9_3 Depth 3
; CHECK-NEXT: vldrw.u32 q0, [sp, #16] @ 16-byte Reload
-; CHECK-NEXT: vmov q7, q2
+; CHECK-NEXT: vmov q1, q2
; CHECK-NEXT: dls lr, r10
; CHECK-NEXT: vmov.i32 q5, #0x0
-; CHECK-NEXT: vmlas.i32 q7, q0, r7
-; CHECK-NEXT: vmov q6, q4
+; CHECK-NEXT: vmlas.i32 q1, q0, r7
+; CHECK-NEXT: vmov q7, q4
; CHECK-NEXT: .LBB9_3: @ %vector.body
; CHECK-NEXT: @ Parent Loop BB9_1 Depth=1
; CHECK-NEXT: @ Parent Loop BB9_2 Depth=2
; CHECK-NEXT: @ => This Inner Loop Header: Depth=3
-; CHECK-NEXT: vadd.i32 q0, q7, q3
-; CHECK-NEXT: vldrw.u32 q1, [r1, q7, uxtw #2]
-; CHECK-NEXT: vldrw.u32 q7, [q6, #32]!
-; CHECK-NEXT: vmul.i32 q1, q1, q7
-; CHECK-NEXT: vmov q7, q0
-; CHECK-NEXT: vadd.i32 q5, q1, q5
+; CHECK-NEXT: vmov q6, q1
+; CHECK-NEXT: vadd.i32 q1, q1, q3
+; CHECK-NEXT: vldrw.u32 q0, [r1, q6, uxtw #2]
+; CHECK-NEXT: vldrw.u32 q6, [q7, #32]!
+; CHECK-NEXT: vmul.i32 q0, q0, q6
+; CHECK-NEXT: vadd.i32 q5, q0, q5
; CHECK-NEXT: le lr, .LBB9_3
; CHECK-NEXT: @ %bb.4: @ %middle.block
; CHECK-NEXT: @ in Loop: Header=BB9_2 Depth=2
@@ -702,12 +702,12 @@ define dso_local void @arm_mat_mult_q15(ptr noalias nocapture readonly %A, ptr n
; CHECK-NEXT: @ Parent Loop BB10_5 Depth=1
; CHECK-NEXT: @ Parent Loop BB10_8 Depth=2
; CHECK-NEXT: @ => This Inner Loop Header: Depth=3
-; CHECK-NEXT: vadd.i32 q6, q5, q3
-; CHECK-NEXT: vldrh.s32 q7, [r1, q5, uxtw #1]
-; CHECK-NEXT: vldrh.s32 q5, [r3], #8
-; CHECK-NEXT: vmul.i32 q5, q7, q5
-; CHECK-NEXT: vadd.i32 q4, q5, q4
-; CHECK-NEXT: vmov q5, q6
+; CHECK-NEXT: vmov q6, q5
+; CHECK-NEXT: vadd.i32 q5, q5, q3
+; CHECK-NEXT: vldrh.s32 q7, [r1, q6, uxtw #1]
+; CHECK-NEXT: vldrh.s32 q6, [r3], #8
+; CHECK-NEXT: vmul.i32 q6, q7, q6
+; CHECK-NEXT: vadd.i32 q4, q6, q4
; CHECK-NEXT: le lr, .LBB10_11
; CHECK-NEXT: @ %bb.12: @ %middle.block
; CHECK-NEXT: @ in Loop: Header=BB10_8 Depth=2
@@ -922,15 +922,15 @@ define hidden arm_aapcs_vfpcc i32 @arm_depthwise_conv_s8(ptr nocapture readonly
; CHECK-NEXT: @ Parent Loop BB11_3 Depth=3
; CHECK-NEXT: @ Parent Loop BB11_4 Depth=4
; CHECK-NEXT: @ => This Inner Loop Header: Depth=5
-; CHECK-NEXT: vldrb.s32 q2, [r0, q5]
-; CHECK-NEXT: vadd.i32 q7, q5, q0
-; CHECK-NEXT: vldrb.s32 q5, [r1, q4]
-; CHECK-NEXT: vadd.i32 q6, q4, q0
-; CHECK-NEXT: vadd.i32 q2, q2, r2
+; CHECK-NEXT: vmov q7, q5
+; CHECK-NEXT: vmov q6, q4
+; CHECK-NEXT: vldrb.s32 q2, [r0, q7]
+; CHECK-NEXT: vldrb.s32 q7, [r1, q6]
; CHECK-NEXT: subs r5, #4
-; CHECK-NEXT: vmlava.u32 r12, q2, q5
-; CHECK-NEXT: vmov q5, q7
-; CHECK-NEXT: vmov q4, q6
+; CHECK-NEXT: vadd.i32 q4, q4, q0
+; CHECK-NEXT: vadd.i32 q2, q2, r2
+; CHECK-NEXT: vadd.i32 q5, q5, q0
+; CHECK-NEXT: vmlava.u32 r12, q2, q7
; CHECK-NEXT: bne .LBB11_5
; CHECK-NEXT: @ %bb.6: @ %middle.block
; CHECK-NEXT: @ in Loop: Header=BB11_4 Depth=4
diff --git a/llvm/test/CodeGen/Thumb2/mve-pipelineloops.ll b/llvm/test/CodeGen/Thumb2/mve-pipelineloops.ll
index 43ed5ee..d6c5cde 100644
--- a/llvm/test/CodeGen/Thumb2/mve-pipelineloops.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-pipelineloops.ll
@@ -18,50 +18,50 @@ define void @arm_cmplx_dot_prod_q15(ptr noundef %pSrcA, ptr noundef %pSrcB, i32
; CHECK-NEXT: csel r7, r6, r5, hs
; CHECK-NEXT: add.w lr, r7, #1
; CHECK-NEXT: mov r4, r5
-; CHECK-NEXT: vldrh.u16 q0, [r0], #32
+; CHECK-NEXT: vldrh.u16 q1, [r0], #32
; CHECK-NEXT: movs r7, #0
; CHECK-NEXT: mov r8, r5
+; CHECK-NEXT: vldrh.u16 q2, [r1], #32
+; CHECK-NEXT: vmlsldava.s16 r4, r7, q1, q2
+; CHECK-NEXT: vldrh.u16 q0, [r0, #-16]
+; CHECK-NEXT: vmlaldavax.s16 r8, r5, q1, q2
+; CHECK-NEXT: vldrh.u16 q2, [r1, #-16]
+; CHECK-NEXT: vmlsldava.s16 r4, r7, q0, q2
; CHECK-NEXT: vldrh.u16 q1, [r1], #32
-; CHECK-NEXT: vmlsldava.s16 r4, r7, q0, q1
-; CHECK-NEXT: vldrh.u16 q2, [r0, #-16]
-; CHECK-NEXT: vmlaldavax.s16 r8, r5, q0, q1
-; CHECK-NEXT: vldrh.u16 q3, [r1, #-16]
-; CHECK-NEXT: vmlsldava.s16 r4, r7, q2, q3
-; CHECK-NEXT: vldrh.u16 q0, [r1], #32
; CHECK-NEXT: sub.w lr, lr, #1
; CHECK-NEXT: cmp.w lr, #0
-; CHECK-NEXT: vldrh.u16 q1, [r0], #32
+; CHECK-NEXT: vldrh.u16 q3, [r0], #32
; CHECK-NEXT: beq .LBB0_3
; CHECK-NEXT: .p2align 2
; CHECK-NEXT: .LBB0_2: @ %while.body
; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: vmlaldavax.s16 r8, r5, q2, q3
-; CHECK-NEXT: vldrh.u16 q3, [r1, #-16]
-; CHECK-NEXT: vmlsldava.s16 r4, r7, q1, q0
-; CHECK-NEXT: vldrh.u16 q2, [r0, #-16]
-; CHECK-NEXT: vmlaldavax.s16 r8, r5, q1, q0
-; CHECK-NEXT: vldrh.u16 q1, [r0], #32
-; CHECK-NEXT: vmlsldava.s16 r4, r7, q2, q3
-; CHECK-NEXT: vldrh.u16 q0, [r1], #32
+; CHECK-NEXT: vmlaldavax.s16 r8, r5, q0, q2
+; CHECK-NEXT: vldrh.u16 q2, [r1, #-16]
+; CHECK-NEXT: vmlsldava.s16 r4, r7, q3, q1
+; CHECK-NEXT: vldrh.u16 q0, [r0, #-16]
+; CHECK-NEXT: vmlaldavax.s16 r8, r5, q3, q1
+; CHECK-NEXT: vldrh.u16 q3, [r0], #32
+; CHECK-NEXT: vmlsldava.s16 r4, r7, q0, q2
+; CHECK-NEXT: vldrh.u16 q1, [r1], #32
; CHECK-NEXT: le lr, .LBB0_2
; CHECK-NEXT: .LBB0_3:
-; CHECK-NEXT: vmlaldavax.s16 r8, r5, q2, q3
+; CHECK-NEXT: vmlaldavax.s16 r8, r5, q0, q2
; CHECK-NEXT: movs r6, #14
; CHECK-NEXT: and.w r2, r6, r2, lsl #1
-; CHECK-NEXT: vmlaldavax.s16 r8, r5, q1, q0
-; CHECK-NEXT: vldrh.u16 q2, [r0, #-16]
-; CHECK-NEXT: vmlsldava.s16 r4, r7, q1, q0
-; CHECK-NEXT: vldrh.u16 q0, [r1, #-16]
-; CHECK-NEXT: vmlaldavax.s16 r8, r5, q2, q0
+; CHECK-NEXT: vmlaldavax.s16 r8, r5, q3, q1
+; CHECK-NEXT: vldrh.u16 q0, [r0, #-16]
+; CHECK-NEXT: vmlsldava.s16 r4, r7, q3, q1
+; CHECK-NEXT: vldrh.u16 q1, [r1, #-16]
+; CHECK-NEXT: vmlaldavax.s16 r8, r5, q0, q1
; CHECK-NEXT: vctp.16 r2
-; CHECK-NEXT: vmlsldava.s16 r4, r7, q2, q0
+; CHECK-NEXT: vmlsldava.s16 r4, r7, q0, q1
; CHECK-NEXT: vpst
-; CHECK-NEXT: vldrht.u16 q1, [r0]
+; CHECK-NEXT: vldrht.u16 q2, [r0]
; CHECK-NEXT: cmp r2, #9
; CHECK-NEXT: vpsttt
; CHECK-NEXT: vldrht.u16 q0, [r1]
-; CHECK-NEXT: vmlsldavat.s16 r4, r7, q1, q0
-; CHECK-NEXT: vmlaldavaxt.s16 r8, r5, q1, q0
+; CHECK-NEXT: vmlsldavat.s16 r4, r7, q2, q0
+; CHECK-NEXT: vmlaldavaxt.s16 r8, r5, q2, q0
; CHECK-NEXT: blo .LBB0_10
; CHECK-NEXT: @ %bb.4: @ %do.body.1
; CHECK-NEXT: subs r2, #8
diff --git a/llvm/test/CodeGen/Thumb2/mve-shuffle.ll b/llvm/test/CodeGen/Thumb2/mve-shuffle.ll
index 94d5490..6f2a0b2 100644
--- a/llvm/test/CodeGen/Thumb2/mve-shuffle.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-shuffle.ll
@@ -439,17 +439,18 @@ define arm_aapcs_vfpcc <8 x i16> @shuffle4step_i16(<32 x i16> %src) {
; CHECK-NEXT: vmovx.f16 s1, s14
; CHECK-NEXT: vmovx.f16 s20, s0
; CHECK-NEXT: vins.f16 s23, s1
-; CHECK-NEXT: vmovx.f16 s1, s2
-; CHECK-NEXT: vins.f16 s20, s1
+; CHECK-NEXT: vmov.f32 s1, s2
+; CHECK-NEXT: vmovx.f16 s2, s2
; CHECK-NEXT: vmovx.f16 s21, s4
-; CHECK-NEXT: vmovx.f16 s1, s6
+; CHECK-NEXT: vins.f16 s20, s2
+; CHECK-NEXT: vmovx.f16 s2, s6
; CHECK-NEXT: vins.f16 s12, s14
; CHECK-NEXT: vins.f16 s8, s10
; CHECK-NEXT: vins.f16 s4, s6
-; CHECK-NEXT: vins.f16 s21, s1
-; CHECK-NEXT: vins.f16 s0, s2
-; CHECK-NEXT: vmov.f32 s1, s4
+; CHECK-NEXT: vins.f16 s21, s2
+; CHECK-NEXT: vins.f16 s0, s1
; CHECK-NEXT: vmov.f32 s2, s8
+; CHECK-NEXT: vmov.f32 s1, s4
; CHECK-NEXT: vmov.f32 s3, s12
; CHECK-NEXT: vadd.i16 q0, q0, q5
; CHECK-NEXT: vadd.i16 q0, q0, q4
diff --git a/llvm/test/CodeGen/Thumb2/mve-vld4.ll b/llvm/test/CodeGen/Thumb2/mve-vld4.ll
index ab41069..ecb1698 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vld4.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vld4.ll
@@ -391,17 +391,18 @@ define void @vld4_v8i16_align1(ptr %src, ptr %dst) {
; CHECK-NEXT: vmovx.f16 s1, s2
; CHECK-NEXT: vmovx.f16 s20, s8
; CHECK-NEXT: vins.f16 s23, s1
-; CHECK-NEXT: vmovx.f16 s1, s10
-; CHECK-NEXT: vins.f16 s20, s1
+; CHECK-NEXT: vmov.f32 s1, s10
+; CHECK-NEXT: vmovx.f16 s10, s10
; CHECK-NEXT: vmovx.f16 s21, s12
-; CHECK-NEXT: vmovx.f16 s1, s14
+; CHECK-NEXT: vins.f16 s20, s10
+; CHECK-NEXT: vmovx.f16 s10, s14
; CHECK-NEXT: vins.f16 s0, s2
; CHECK-NEXT: vins.f16 s12, s14
; CHECK-NEXT: vins.f16 s4, s6
-; CHECK-NEXT: vins.f16 s8, s10
-; CHECK-NEXT: vins.f16 s21, s1
-; CHECK-NEXT: vmov.f32 s9, s12
+; CHECK-NEXT: vins.f16 s21, s10
; CHECK-NEXT: vmov.f32 s10, s4
+; CHECK-NEXT: vins.f16 s8, s1
+; CHECK-NEXT: vmov.f32 s9, s12
; CHECK-NEXT: vmov.f32 s11, s0
; CHECK-NEXT: vadd.i16 q0, q2, q5
; CHECK-NEXT: vadd.i16 q0, q0, q4
diff --git a/llvm/test/CodeGen/Thumb2/mve-vmaxnma-commute.ll b/llvm/test/CodeGen/Thumb2/mve-vmaxnma-commute.ll
index 04be18e..6656d44 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vmaxnma-commute.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vmaxnma-commute.ll
@@ -344,14 +344,14 @@ define void @loop_absmax32_pred_c(ptr %0, i32 %1, ptr nocapture %2) {
; CHECK-NEXT: vmov.i32 q0, #0x0
; CHECK-NEXT: dlstp.32 lr, r1
; CHECK-NEXT: .LBB19_1: @ =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: vldrw.u32 q1, [r0], #16
-; CHECK-NEXT: vmaxnma.f32 q1, q0
-; CHECK-NEXT: vmov q0, q1
+; CHECK-NEXT: vmov q1, q0
+; CHECK-NEXT: vldrw.u32 q0, [r0], #16
+; CHECK-NEXT: vmaxnma.f32 q0, q1
; CHECK-NEXT: letp lr, .LBB19_1
; CHECK-NEXT: @ %bb.2:
-; CHECK-NEXT: vldr s0, .LCPI19_0
-; CHECK-NEXT: vmov r0, s0
-; CHECK-NEXT: vmaxnmav.f32 r0, q1
+; CHECK-NEXT: vldr s4, .LCPI19_0
+; CHECK-NEXT: vmov r0, s4
+; CHECK-NEXT: vmaxnmav.f32 r0, q0
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vstr s0, [r2]
; CHECK-NEXT: pop {r7, pc}
@@ -538,14 +538,14 @@ define void @loop_absmax16_pred_c(ptr %0, i32 %1, ptr nocapture %2) {
; CHECK-NEXT: vmov.i32 q0, #0x0
; CHECK-NEXT: dlstp.16 lr, r1
; CHECK-NEXT: .LBB23_1: @ =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: vldrh.u16 q1, [r0], #8
-; CHECK-NEXT: vmaxnma.f16 q1, q0
-; CHECK-NEXT: vmov q0, q1
+; CHECK-NEXT: vmov q1, q0
+; CHECK-NEXT: vldrh.u16 q0, [r0], #8
+; CHECK-NEXT: vmaxnma.f16 q0, q1
; CHECK-NEXT: letp lr, .LBB23_1
; CHECK-NEXT: @ %bb.2:
-; CHECK-NEXT: vldr.16 s0, .LCPI23_0
-; CHECK-NEXT: vmov r0, s0
-; CHECK-NEXT: vmaxnmav.f16 r0, q1
+; CHECK-NEXT: vldr.16 s4, .LCPI23_0
+; CHECK-NEXT: vmov r0, s4
+; CHECK-NEXT: vmaxnmav.f16 r0, q0
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vstr.16 s0, [r2]
; CHECK-NEXT: pop {r7, pc}
diff --git a/llvm/test/CodeGen/Thumb2/mve-vst4.ll b/llvm/test/CodeGen/Thumb2/mve-vst4.ll
index 26ab555..fb5f543 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vst4.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vst4.ll
@@ -1055,18 +1055,18 @@ define void @vst4_v4f16(ptr %src, ptr %dst) {
; CHECK-NEXT: vins.f16 s12, s2
; CHECK-NEXT: vmovx.f16 s2, s3
; CHECK-NEXT: vins.f16 s11, s2
-; CHECK-NEXT: vmovx.f16 s2, s4
-; CHECK-NEXT: vins.f16 s4, s6
-; CHECK-NEXT: vmovx.f16 s6, s6
+; CHECK-NEXT: vmov.f32 s2, s6
+; CHECK-NEXT: vmovx.f16 s6, s4
+; CHECK-NEXT: vins.f16 s4, s2
+; CHECK-NEXT: vmovx.f16 s2, s2
; CHECK-NEXT: vins.f16 s1, s3
-; CHECK-NEXT: vins.f16 s2, s6
-; CHECK-NEXT: vmovx.f16 s6, s7
+; CHECK-NEXT: vins.f16 s6, s2
+; CHECK-NEXT: vmovx.f16 s2, s7
; CHECK-NEXT: vmov.f32 s8, s5
-; CHECK-NEXT: vins.f16 s10, s6
+; CHECK-NEXT: vins.f16 s10, s2
; CHECK-NEXT: vmov.f32 s9, s1
; CHECK-NEXT: vmov.f32 s5, s0
; CHECK-NEXT: vstrh.16 q2, [r1, #16]
-; CHECK-NEXT: vmov.f32 s6, s2
; CHECK-NEXT: vmov.f32 s7, s12
; CHECK-NEXT: vstrh.16 q1, [r1]
; CHECK-NEXT: pop {r4, r5, r6, pc}
diff --git a/llvm/test/CodeGen/Thumb2/pacbti-m-vla.ll b/llvm/test/CodeGen/Thumb2/pacbti-m-vla.ll
index e6fcf56..2929a04 100644
--- a/llvm/test/CodeGen/Thumb2/pacbti-m-vla.ll
+++ b/llvm/test/CodeGen/Thumb2/pacbti-m-vla.ll
@@ -63,8 +63,8 @@ define hidden i32 @f(i32 %n) local_unnamed_addr #0 {
; CHECK-NEXT: subs r0, #4
; CHECK-NEXT: sub.w r3, r4, #16
; CHECK-NEXT: add.w lr, r2, r0, lsr #2
-; CHECK-NEXT: movs r2, #0
; CHECK-NEXT: movs r0, #0
+; CHECK-NEXT: movs r2, #0
; CHECK-NEXT: .LBB0_5: @ %for.body
; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ldr r5, [r3, #16]!
diff --git a/llvm/test/CodeGen/WebAssembly/memory-interleave.ll b/llvm/test/CodeGen/WebAssembly/memory-interleave.ll
index 404db23..5d58ae2 100644
--- a/llvm/test/CodeGen/WebAssembly/memory-interleave.ll
+++ b/llvm/test/CodeGen/WebAssembly/memory-interleave.ll
@@ -1720,28 +1720,7 @@ for.body: ; preds = %entry, %for.body
}
; CHECK-LABEL: two_floats_two_bytes_same_op:
-; CHECK: loop
-; CHECK: v128.load
-; CHECK: v128.load
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27
-; CHECK: v128.load
-; CHECK: v128.load
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27
-; CHECK: f32x4.mul
-; CHECK: i32x4.trunc_sat_f32x4_s
-; CHECK: v128.const 255, 255, 255, 255
-; CHECK: v128.and
-; CHECK: i16x8.narrow_i32x4_u
-; CHECK: i8x16.narrow_i16x8_u
-; CHECK: i8x16.shuffle 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
-; CHECK: i8x16.shuffle 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
-; CHECK: f32x4.mul
-; CHECK: i32x4.trunc_sat_f32x4_s
-; CHECK: v128.and
-; CHECK: i16x8.narrow_i32x4_u
-; CHECK: i8x16.narrow_i16x8_u
-; CHECK: i8x16.shuffle 0, 16, 1, 17, 2, 18, 3, 19, 0, 0, 0, 0, 0, 0, 0, 0
-; CHECK: v128.store64_lane
+; CHECK-NOT: v128.load
define hidden void @two_floats_two_bytes_same_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) {
entry:
%cmp22.not = icmp eq i32 %N, 0
@@ -1774,28 +1753,7 @@ for.body: ; preds = %entry, %for.body
}
; CHECK-LABEL: two_floats_two_bytes_vary_op:
-; CHECK: loop
-; CHECK: v128.load
-; CHECK: v128.load
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27
-; CHECK: v128.load
-; CHECK: v128.load
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27
-; CHECK: f32x4.add
-; CHECK: i32x4.trunc_sat_f32x4_s
-; CHECK: v128.const 255, 255, 255, 255
-; CHECK: v128.and
-; CHECK: i16x8.narrow_i32x4_u
-; CHECK: i8x16.narrow_i16x8_u
-; CHECK: i8x16.shuffle 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
-; CHECK: i8x16.shuffle 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
-; CHECK: f32x4.sub
-; CHECK: i32x4.trunc_sat_f32x4_s
-; CHECK: v128.and
-; CHECK: i16x8.narrow_i32x4_u
-; CHECK: i8x16.narrow_i16x8_u
-; CHECK: i8x16.shuffle 0, 16, 1, 17, 2, 18, 3, 19, 0, 0, 0, 0, 0, 0, 0, 0
-; CHECK: v128.store64_lane
+; CHECK-NOT: v128.load
define hidden void @two_floats_two_bytes_vary_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) {
entry:
%cmp21.not = icmp eq i32 %N, 0
@@ -2347,64 +2305,7 @@ for.body: ; preds = %entry, %for.body
}
; CHECK-LABEL: four_floats_four_bytes_same_op:
-; CHECK: loop
-; CHECK: v128.load
-; CHECK: v128.load
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 16, 17, 18, 19, 0, 1, 2, 3, 0, 1, 2, 3
-; CHECK: v128.load
-; CHECK: v128.load
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 16, 17, 18, 19
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
-; CHECK: v128.load
-; CHECK: v128.load
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 16, 17, 18, 19, 0, 1, 2, 3, 0, 1, 2, 3
-; CHECK: v128.load
-; CHECK: v128.load
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 16, 17, 18, 19
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
-; CHECK: f32x4.mul
-; CHECK: i32x4.trunc_sat_f32x4_s
-; CHECK: v128.const 255, 255, 255, 255
-; CHECK: v128.and
-; CHECK: i16x8.narrow_i32x4_u
-; CHECK: i8x16.narrow_i16x8_u
-; CHECK: i8x16.shuffle 4, 5, 6, 7, 20, 21, 22, 23, 0, 1, 2, 3, 0, 1, 2, 3
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7, 20, 21, 22, 23
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
-; CHECK: i8x16.shuffle 4, 5, 6, 7, 20, 21, 22, 23, 0, 1, 2, 3, 0, 1, 2, 3
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7, 20, 21, 22, 23
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
-; CHECK: f32x4.mul
-; CHECK: i32x4.trunc_sat_f32x4_s
-; CHECK: v128.and
-; CHECK: i16x8.narrow_i32x4_u
-; CHECK: i8x16.narrow_i16x8_u
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 16, 17, 18, 19, 0, 0, 0, 0, 0, 0, 0, 0
-; CHECK: i8x16.shuffle 8, 9, 10, 11, 24, 25, 26, 27, 0, 1, 2, 3, 0, 1, 2, 3
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 24, 25, 26, 27
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
-; CHECK: i8x16.shuffle 8, 9, 10, 11, 24, 25, 26, 27, 0, 1, 2, 3, 0, 1, 2, 3
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 24, 25, 26, 27
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
-; CHECK: f32x4.mul
-; CHECK: i32x4.trunc_sat_f32x4_s
-; CHECK: v128.and
-; CHECK: i16x8.narrow_i32x4_u
-; CHECK: i8x16.narrow_i16x8_u
-; CHECK: i8x16.shuffle 12, 13, 14, 15, 28, 29, 30, 31, 0, 1, 2, 3, 0, 1, 2, 3
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 0, 1, 2, 3, 12, 13, 14, 15, 28, 29, 30, 31
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
-; CHECK: i8x16.shuffle 12, 13, 14, 15, 28, 29, 30, 31, 0, 1, 2, 3, 0, 1, 2, 3
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 0, 1, 2, 3, 12, 13, 14, 15, 28, 29, 30, 31
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
-; CHECK: f32x4.mul
-; CHECK: i32x4.trunc_sat_f32x4_s
-; CHECK: v128.and
-; CHECK: i16x8.narrow_i32x4_u
-; CHECK: i8x16.narrow_i16x8_u
-; CHECK: i8x16.shuffle 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 16, 17, 18, 19
-; CHECK: i8x16.shuffle 0, 4, 24, 28, 1, 5, 25, 29, 2, 6, 26, 30, 3, 7, 27, 31
-; CHECK: v128.store
+; CHECK-NOT: v128.load
define hidden void @four_floats_four_bytes_same_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) {
entry:
%cmp48.not = icmp eq i32 %N, 0
@@ -2453,64 +2354,7 @@ for.body: ; preds = %entry, %for.body
}
; CHECK-LABEL: four_floats_four_bytes_vary_op:
-; CHECK: loop
-; CHECK: v128.load
-; CHECK: v128.load
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 16, 17, 18, 19, 0, 1, 2, 3, 0, 1, 2, 3
-; CHECK: v128.load
-; CHECK: v128.load
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 16, 17, 18, 19
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
-; CHECK: v128.load
-; CHECK: v128.load
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 16, 17, 18, 19, 0, 1, 2, 3, 0, 1, 2, 3
-; CHECK: v128.load
-; CHECK: v128.load
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 16, 17, 18, 19
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
-; CHECK: f32x4.mul
-; CHECK: i32x4.trunc_sat_f32x4_s
-; CHECK: v128.const 255, 255, 255, 255
-; CHECK: v128.and
-; CHECK: i16x8.narrow_i32x4_u
-; CHECK: i8x16.narrow_i16x8_u
-; CHECK: i8x16.shuffle 4, 5, 6, 7, 20, 21, 22, 23, 0, 1, 2, 3, 0, 1, 2, 3
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7, 20, 21, 22, 23
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
-; CHECK: i8x16.shuffle 4, 5, 6, 7, 20, 21, 22, 23, 0, 1, 2, 3, 0, 1, 2, 3
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7, 20, 21, 22, 23
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
-; CHECK: f32x4.add
-; CHECK: i32x4.trunc_sat_f32x4_s
-; CHECK: v128.and
-; CHECK: i16x8.narrow_i32x4_u
-; CHECK: i8x16.narrow_i16x8_u
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 16, 17, 18, 19, 0, 0, 0, 0, 0, 0, 0, 0
-; CHECK: i8x16.shuffle 8, 9, 10, 11, 24, 25, 26, 27, 0, 1, 2, 3, 0, 1, 2, 3
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 24, 25, 26, 27
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
-; CHECK: i8x16.shuffle 8, 9, 10, 11, 24, 25, 26, 27, 0, 1, 2, 3, 0, 1, 2, 3
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 24, 25, 26, 27
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
-; CHECK: f32x4.div
-; CHECK: i32x4.trunc_sat_f32x4_s
-; CHECK: v128.and
-; CHECK: i16x8.narrow_i32x4_u
-; CHECK: i8x16.narrow_i16x8_u
-; CHECK: i8x16.shuffle 12, 13, 14, 15, 28, 29, 30, 31, 0, 1, 2, 3, 0, 1, 2, 3
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 0, 1, 2, 3, 12, 13, 14, 15, 28, 29, 30, 31
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
-; CHECK: i8x16.shuffle 12, 13, 14, 15, 28, 29, 30, 31, 0, 1, 2, 3, 0, 1, 2, 3
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 0, 1, 2, 3, 12, 13, 14, 15, 28, 29, 30, 31
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
-; CHECK: f32x4.sub
-; CHECK: i32x4.trunc_sat_f32x4_s
-; CHECK: v128.and
-; CHECK: i16x8.narrow_i32x4_u
-; CHECK: i8x16.narrow_i16x8_u
-; CHECK: i8x16.shuffle 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 16, 17, 18, 19
-; CHECK: i8x16.shuffle 0, 4, 24, 28, 1, 5, 25, 29, 2, 6, 26, 30, 3, 7, 27, 31
-; CHECK: v128.store
+; CHECK-NOT: v128.load
define hidden void @four_floats_four_bytes_vary_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) {
entry:
%cmp45.not = icmp eq i32 %N, 0
@@ -2757,62 +2601,7 @@ for.body: ; preds = %entry, %for.body
}
; CHECK-LABEL: four_floats_four_shorts_same_op:
-; CHECK: loop
-; CHECK: v128.load
-; CHECK: v128.load
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 16, 17, 18, 19, 0, 1, 2, 3, 0, 1, 2, 3
-; CHECK: v128.load
-; CHECK: v128.load
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 16, 17, 18, 19
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
-; CHECK: v128.load
-; CHECK: v128.load
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 16, 17, 18, 19, 0, 1, 2, 3, 0, 1, 2, 3
-; CHECK: v128.load
-; CHECK: v128.load
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 16, 17, 18, 19
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
-; CHECK: f32x4.mul
-; CHECK: i32x4.trunc_sat_f32x4_s
-; CHECK: v128.const 65535, 65535, 65535, 65535
-; CHECK: v128.and
-; CHECK: i16x8.narrow_i32x4_u
-; CHECK: i8x16.shuffle 4, 5, 6, 7, 20, 21, 22, 23, 0, 1, 2, 3, 0, 1, 2, 3
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7, 20, 21, 22, 23
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
-; CHECK: i8x16.shuffle 4, 5, 6, 7, 20, 21, 22, 23, 0, 1, 2, 3, 0, 1, 2, 3
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7, 20, 21, 22, 23
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
-; CHECK: f32x4.mul
-; CHECK: i32x4.trunc_sat_f32x4_s
-; CHECK: v128.and
-; CHECK: i16x8.narrow_i32x4_u
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23
-; CHECK: i8x16.shuffle 8, 9, 10, 11, 24, 25, 26, 27, 0, 1, 2, 3, 0, 1, 2, 3
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 24, 25, 26, 27
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
-; CHECK: i8x16.shuffle 8, 9, 10, 11, 24, 25, 26, 27, 0, 1, 2, 3, 0, 1, 2, 3
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 24, 25, 26, 27
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
-; CHECK: f32x4.mul
-; CHECK: i32x4.trunc_sat_f32x4_s
-; CHECK: v128.and
-; CHECK: i16x8.narrow_i32x4_u
-; CHECK: i8x16.shuffle 12, 13, 14, 15, 28, 29, 30, 31, 0, 1, 2, 3, 0, 1, 2, 3
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 0, 1, 2, 3, 12, 13, 14, 15, 28, 29, 30, 31
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
-; CHECK: i8x16.shuffle 12, 13, 14, 15, 28, 29, 30, 31, 0, 1, 2, 3, 0, 1, 2, 3
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 0, 1, 2, 3, 12, 13, 14, 15, 28, 29, 30, 31
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
-; CHECK: f32x4.mul
-; CHECK: i32x4.trunc_sat_f32x4_s
-; CHECK: v128.and
-; CHECK: i16x8.narrow_i32x4_u
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23
-; CHECK: i8x16.shuffle 4, 5, 12, 13, 20, 21, 28, 29, 6, 7, 14, 15, 22, 23, 30, 31
-; CHECK: v128.store
-; CHECK: i8x16.shuffle 0, 1, 8, 9, 16, 17, 24, 25, 2, 3, 10, 11, 18, 19, 26, 27
-; CHECK: v128.store
+; CHECK-NOT: v128.load
define hidden void @four_floats_four_shorts_same_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) {
entry:
%cmp48.not = icmp eq i32 %N, 0
@@ -2861,62 +2650,7 @@ for.body: ; preds = %entry, %for.body
}
; CHECK-LABEL: four_floats_four_shorts_vary_op:
-; CHECK: loop
-; CHECK: v128.load
-; CHECK: v128.load
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 16, 17, 18, 19, 0, 1, 2, 3, 0, 1, 2, 3
-; CHECK: v128.load
-; CHECK: v128.load
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 16, 17, 18, 19
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
-; CHECK: v128.load
-; CHECK: v128.load
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 16, 17, 18, 19, 0, 1, 2, 3, 0, 1, 2, 3
-; CHECK: v128.load
-; CHECK: v128.load
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 16, 17, 18, 19
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
-; CHECK: f32x4.mul
-; CHECK: i32x4.trunc_sat_f32x4_s
-; CHECK: v128.const 65535, 65535, 65535, 65535
-; CHECK: v128.and
-; CHECK: i16x8.narrow_i32x4_u
-; CHECK: i8x16.shuffle 4, 5, 6, 7, 20, 21, 22, 23, 0, 1, 2, 3, 0, 1, 2, 3
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7, 20, 21, 22, 23
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
-; CHECK: i8x16.shuffle 4, 5, 6, 7, 20, 21, 22, 23, 0, 1, 2, 3, 0, 1, 2, 3
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7, 20, 21, 22, 23
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
-; CHECK: f32x4.add
-; CHECK: i32x4.trunc_sat_f32x4_s
-; CHECK: v128.and
-; CHECK: i16x8.narrow_i32x4_u
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23
-; CHECK: i8x16.shuffle 8, 9, 10, 11, 24, 25, 26, 27, 0, 1, 2, 3, 0, 1, 2, 3
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 24, 25, 26, 27
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
-; CHECK: i8x16.shuffle 8, 9, 10, 11, 24, 25, 26, 27, 0, 1, 2, 3, 0, 1, 2, 3
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 24, 25, 26, 27
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
-; CHECK: f32x4.div
-; CHECK: i32x4.trunc_sat_f32x4_s
-; CHECK: v128.and
-; CHECK: i16x8.narrow_i32x4_u
-; CHECK: i8x16.shuffle 12, 13, 14, 15, 28, 29, 30, 31, 0, 1, 2, 3, 0, 1, 2, 3
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 0, 1, 2, 3, 12, 13, 14, 15, 28, 29, 30, 31
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
-; CHECK: i8x16.shuffle 12, 13, 14, 15, 28, 29, 30, 31, 0, 1, 2, 3, 0, 1, 2, 3
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 0, 1, 2, 3, 12, 13, 14, 15, 28, 29, 30, 31
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
-; CHECK: f32x4.sub
-; CHECK: i32x4.trunc_sat_f32x4_s
-; CHECK: v128.and
-; CHECK: i16x8.narrow_i32x4_u
-; CHECK: i8x16.shuffle 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23
-; CHECK: i8x16.shuffle 4, 5, 12, 13, 20, 21, 28, 29, 6, 7, 14, 15, 22, 23, 30, 31
-; CHECK: v128.store
-; CHECK: i8x16.shuffle 0, 1, 8, 9, 16, 17, 24, 25, 2, 3, 10, 11, 18, 19, 26, 27
-; CHECK: v128.store
+; CHECK-NOT: v128.load
define hidden void @four_floats_four_shorts_vary_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) {
entry:
%cmp45.not = icmp eq i32 %N, 0
diff --git a/llvm/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll b/llvm/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll
index 1962dde..f2b4c49 100644
--- a/llvm/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll
+++ b/llvm/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll
@@ -36,10 +36,10 @@ define void @f(ptr nocapture %arg, ptr nocapture %arg1, ptr nocapture %arg2, ptr
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: xorl %edi, %edi
; CHECK-NEXT: testb %al, %al
-; CHECK-NEXT: Ltmp0:
+; CHECK-NEXT: Ltmp0: ## EH_LABEL
; CHECK-NEXT: ## implicit-def: $ebx
; CHECK-NEXT: calll __Znam
-; CHECK-NEXT: Ltmp1:
+; CHECK-NEXT: Ltmp1: ## EH_LABEL
; CHECK-NEXT: ## %bb.1: ## %bb11
; CHECK-NEXT: movl %eax, %esi
; CHECK-NEXT: movb $1, %al
@@ -58,13 +58,13 @@ define void @f(ptr nocapture %arg, ptr nocapture %arg1, ptr nocapture %arg2, ptr
; CHECK-NEXT: jne LBB0_9
; CHECK-NEXT: ## %bb.10: ## %bb41
; CHECK-NEXT: ## in Loop: Header=BB0_8 Depth=1
-; CHECK-NEXT: Ltmp2:
+; CHECK-NEXT: Ltmp2: ## EH_LABEL
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: movl %eax, {{[0-9]+}}(%esp)
; CHECK-NEXT: movl %eax, {{[0-9]+}}(%esp)
; CHECK-NEXT: movl %esi, (%esp)
; CHECK-NEXT: calll _Pjii
-; CHECK-NEXT: Ltmp3:
+; CHECK-NEXT: Ltmp3: ## EH_LABEL
; CHECK-NEXT: ## %bb.11: ## %bb42
; CHECK-NEXT: ## in Loop: Header=BB0_8 Depth=1
; CHECK-NEXT: xorl %eax, %eax
@@ -126,20 +126,20 @@ define void @f(ptr nocapture %arg, ptr nocapture %arg1, ptr nocapture %arg2, ptr
; CHECK-NEXT: decl {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Folded Spill
; CHECK-NEXT: jmp LBB0_8
; CHECK-NEXT: LBB0_18: ## %bb43
-; CHECK-NEXT: Ltmp5:
+; CHECK-NEXT: Ltmp5: ## EH_LABEL
; CHECK-NEXT: movl %esi, %ebx
; CHECK-NEXT: calll _OnOverFlow
-; CHECK-NEXT: Ltmp6:
+; CHECK-NEXT: Ltmp6: ## EH_LABEL
; CHECK-NEXT: jmp LBB0_3
; CHECK-NEXT: LBB0_2: ## %bb29
-; CHECK-NEXT: Ltmp7:
+; CHECK-NEXT: Ltmp7: ## EH_LABEL
; CHECK-NEXT: movl %esi, %ebx
; CHECK-NEXT: calll _OnOverFlow
-; CHECK-NEXT: Ltmp8:
+; CHECK-NEXT: Ltmp8: ## EH_LABEL
; CHECK-NEXT: LBB0_3: ## %bb30
; CHECK-NEXT: ud2
; CHECK-NEXT: LBB0_4: ## %bb20.loopexit
-; CHECK-NEXT: Ltmp4:
+; CHECK-NEXT: Ltmp4: ## EH_LABEL
; CHECK-NEXT: LBB0_9:
; CHECK-NEXT: movl %esi, %ebx
; CHECK-NEXT: LBB0_6: ## %bb23
@@ -151,7 +151,7 @@ define void @f(ptr nocapture %arg, ptr nocapture %arg1, ptr nocapture %arg2, ptr
; CHECK-NEXT: popl %ebp
; CHECK-NEXT: retl
; CHECK-NEXT: LBB0_5: ## %bb20.loopexit.split-lp
-; CHECK-NEXT: Ltmp9:
+; CHECK-NEXT: Ltmp9: ## EH_LABEL
; CHECK-NEXT: jmp LBB0_6
; CHECK-NEXT: Lfunc_end0:
bb:
diff --git a/llvm/test/CodeGen/X86/3addr-16bit.ll b/llvm/test/CodeGen/X86/3addr-16bit.ll
index c9390d9..2b692bf 100644
--- a/llvm/test/CodeGen/X86/3addr-16bit.ll
+++ b/llvm/test/CodeGen/X86/3addr-16bit.ll
@@ -10,27 +10,27 @@ define zeroext i16 @test1(i16 zeroext %c, i16 zeroext %k) nounwind ssp {
; X64-LABEL: test1:
; X64: ## %bb.0: ## %entry
; X64-NEXT: movl %esi, %eax
-; X64-NEXT: incl %eax
-; X64-NEXT: cmpw %di, %si
+; X64-NEXT: incl %esi
+; X64-NEXT: cmpw %di, %ax
; X64-NEXT: jne LBB0_2
; X64-NEXT: ## %bb.1: ## %bb
; X64-NEXT: pushq %rbx
-; X64-NEXT: movzwl %ax, %ebx
+; X64-NEXT: movzwl %si, %ebx
; X64-NEXT: movl %ebx, %edi
; X64-NEXT: callq _foo
; X64-NEXT: movl %ebx, %eax
; X64-NEXT: popq %rbx
; X64-NEXT: retq
; X64-NEXT: LBB0_2: ## %bb1
-; X64-NEXT: movzwl %ax, %eax
+; X64-NEXT: movzwl %si, %eax
; X64-NEXT: retq
;
; X86-LABEL: test1:
; X86: ## %bb.0: ## %entry
; X86-NEXT: pushl %esi
; X86-NEXT: subl $8, %esp
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, %ecx
; X86-NEXT: incl %eax
; X86-NEXT: cmpw {{[0-9]+}}(%esp), %cx
; X86-NEXT: jne LBB0_2
@@ -63,27 +63,27 @@ define zeroext i16 @test2(i16 zeroext %c, i16 zeroext %k) nounwind ssp {
; X64-LABEL: test2:
; X64: ## %bb.0: ## %entry
; X64-NEXT: movl %esi, %eax
-; X64-NEXT: decl %eax
-; X64-NEXT: cmpw %di, %si
+; X64-NEXT: decl %esi
+; X64-NEXT: cmpw %di, %ax
; X64-NEXT: jne LBB1_2
; X64-NEXT: ## %bb.1: ## %bb
; X64-NEXT: pushq %rbx
-; X64-NEXT: movzwl %ax, %ebx
+; X64-NEXT: movzwl %si, %ebx
; X64-NEXT: movl %ebx, %edi
; X64-NEXT: callq _foo
; X64-NEXT: movl %ebx, %eax
; X64-NEXT: popq %rbx
; X64-NEXT: retq
; X64-NEXT: LBB1_2: ## %bb1
-; X64-NEXT: movzwl %ax, %eax
+; X64-NEXT: movzwl %si, %eax
; X64-NEXT: retq
;
; X86-LABEL: test2:
; X86: ## %bb.0: ## %entry
; X86-NEXT: pushl %esi
; X86-NEXT: subl $8, %esp
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, %ecx
; X86-NEXT: decl %eax
; X86-NEXT: cmpw {{[0-9]+}}(%esp), %cx
; X86-NEXT: jne LBB1_2
@@ -118,27 +118,27 @@ define zeroext i16 @test3(i16 zeroext %c, i16 zeroext %k) nounwind ssp {
; X64-LABEL: test3:
; X64: ## %bb.0: ## %entry
; X64-NEXT: movl %esi, %eax
-; X64-NEXT: addl $2, %eax
-; X64-NEXT: cmpw %di, %si
+; X64-NEXT: addl $2, %esi
+; X64-NEXT: cmpw %di, %ax
; X64-NEXT: jne LBB2_2
; X64-NEXT: ## %bb.1: ## %bb
; X64-NEXT: pushq %rbx
-; X64-NEXT: movzwl %ax, %ebx
+; X64-NEXT: movzwl %si, %ebx
; X64-NEXT: movl %ebx, %edi
; X64-NEXT: callq _foo
; X64-NEXT: movl %ebx, %eax
; X64-NEXT: popq %rbx
; X64-NEXT: retq
; X64-NEXT: LBB2_2: ## %bb1
-; X64-NEXT: movzwl %ax, %eax
+; X64-NEXT: movzwl %si, %eax
; X64-NEXT: retq
;
; X86-LABEL: test3:
; X86: ## %bb.0: ## %entry
; X86-NEXT: pushl %esi
; X86-NEXT: subl $8, %esp
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, %ecx
; X86-NEXT: addl $2, %eax
; X86-NEXT: cmpw {{[0-9]+}}(%esp), %cx
; X86-NEXT: jne LBB2_2
@@ -171,19 +171,19 @@ define zeroext i16 @test4(i16 zeroext %c, i16 zeroext %k) nounwind ssp {
; X64-LABEL: test4:
; X64: ## %bb.0: ## %entry
; X64-NEXT: movl %esi, %eax
-; X64-NEXT: addl %edi, %eax
-; X64-NEXT: cmpw %di, %si
+; X64-NEXT: addl %edi, %esi
+; X64-NEXT: cmpw %di, %ax
; X64-NEXT: jne LBB3_2
; X64-NEXT: ## %bb.1: ## %bb
; X64-NEXT: pushq %rbx
-; X64-NEXT: movzwl %ax, %ebx
+; X64-NEXT: movzwl %si, %ebx
; X64-NEXT: movl %ebx, %edi
; X64-NEXT: callq _foo
; X64-NEXT: movl %ebx, %eax
; X64-NEXT: popq %rbx
; X64-NEXT: retq
; X64-NEXT: LBB3_2: ## %bb1
-; X64-NEXT: movzwl %ax, %eax
+; X64-NEXT: movzwl %si, %eax
; X64-NEXT: retq
;
; X86-LABEL: test4:
@@ -191,8 +191,8 @@ define zeroext i16 @test4(i16 zeroext %c, i16 zeroext %k) nounwind ssp {
; X86-NEXT: pushl %esi
; X86-NEXT: subl $8, %esp
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movl %edx, %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, %edx
; X86-NEXT: addl %ecx, %eax
; X86-NEXT: cmpw %cx, %dx
; X86-NEXT: jne LBB3_2
diff --git a/llvm/test/CodeGen/X86/StackColoring-dbg-invariance.mir b/llvm/test/CodeGen/X86/StackColoring-dbg-invariance.mir
index 348a290..2445306 100644
--- a/llvm/test/CodeGen/X86/StackColoring-dbg-invariance.mir
+++ b/llvm/test/CodeGen/X86/StackColoring-dbg-invariance.mir
@@ -55,7 +55,7 @@
!9 = !DILocalVariable(name: "4", scope: !5, file: !1, line: 4, type: !10)
!10 = !DIBasicType(name: "ty64", size: 64, encoding: DW_ATE_unsigned)
!11 = !DILocation(line: 4, column: 1, scope: !5)
- !12 = distinct !DISubprogram(name: "test_2", linkageName: "test_2", scope: null, file: !1, line: 1, type: !6, scopeLine: 1, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !8)
+ !12 = distinct !DISubprogram(name: "test_2", linkageName: "test_2", scope: null, file: !1, line: 1, type: !6, scopeLine: 1, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !7)
...
---
diff --git a/llvm/test/CodeGen/X86/atomic-rm-bit-test.ll b/llvm/test/CodeGen/X86/atomic-rm-bit-test.ll
index b4d40fe..71887e3 100644
--- a/llvm/test/CodeGen/X86/atomic-rm-bit-test.ll
+++ b/llvm/test/CodeGen/X86/atomic-rm-bit-test.ll
@@ -2156,15 +2156,17 @@ define zeroext i16 @atomic_shl1_mask01_xor_16_gpr_brz(ptr %v, i16 zeroext %c) no
; X64-LABEL: atomic_shl1_mask01_xor_16_gpr_brz:
; X64: # %bb.0: # %entry
; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: movl %ecx, %edx
; X64-NEXT: andb $15, %cl
-; X64-NEXT: movl $1, %edx
-; X64-NEXT: shll %cl, %edx
+; X64-NEXT: movl $1, %esi
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
+; X64-NEXT: shll %cl, %esi
; X64-NEXT: movzwl (%rdi), %eax
; X64-NEXT: .p2align 4
; X64-NEXT: .LBB34_1: # %atomicrmw.start
; X64-NEXT: # =>This Inner Loop Header: Depth=1
; X64-NEXT: movl %eax, %ecx
-; X64-NEXT: xorl %edx, %ecx
+; X64-NEXT: xorl %esi, %ecx
; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: lock cmpxchgw %cx, (%rdi)
; X64-NEXT: # kill: def $ax killed $ax def $eax
@@ -2172,12 +2174,12 @@ define zeroext i16 @atomic_shl1_mask01_xor_16_gpr_brz(ptr %v, i16 zeroext %c) no
; X64-NEXT: # %bb.2: # %atomicrmw.end
; X64-NEXT: movzwl %ax, %ecx
; X64-NEXT: movw $123, %ax
-; X64-NEXT: testl %ecx, %edx
+; X64-NEXT: testl %ecx, %esi
; X64-NEXT: je .LBB34_3
; X64-NEXT: # %bb.4: # %return
; X64-NEXT: retq
; X64-NEXT: .LBB34_3: # %if.then
-; X64-NEXT: movzwl %si, %eax
+; X64-NEXT: movzwl %dx, %eax
; X64-NEXT: movzwl (%rdi,%rax,2), %eax
; X64-NEXT: retq
entry:
@@ -3398,10 +3400,12 @@ define zeroext i16 @atomic_shl1_mask01_and_16_gpr_brnz(ptr %v, i16 zeroext %c) n
; X64-LABEL: atomic_shl1_mask01_and_16_gpr_brnz:
; X64: # %bb.0: # %entry
; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: movl %ecx, %edx
; X64-NEXT: andb $15, %cl
-; X64-NEXT: movl $1, %edx
-; X64-NEXT: shll %cl, %edx
+; X64-NEXT: movl $1, %esi
+; X64-NEXT: shll %cl, %esi
; X64-NEXT: movl $-2, %r8d
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
; X64-NEXT: roll %cl, %r8d
; X64-NEXT: movzwl (%rdi), %eax
; X64-NEXT: .p2align 4
@@ -3415,10 +3419,10 @@ define zeroext i16 @atomic_shl1_mask01_and_16_gpr_brnz(ptr %v, i16 zeroext %c) n
; X64-NEXT: jne .LBB52_1
; X64-NEXT: # %bb.2: # %atomicrmw.end
; X64-NEXT: movzwl %ax, %eax
-; X64-NEXT: testl %eax, %edx
+; X64-NEXT: testl %eax, %esi
; X64-NEXT: je .LBB52_3
; X64-NEXT: # %bb.4: # %if.then
-; X64-NEXT: movzwl %si, %eax
+; X64-NEXT: movzwl %dx, %eax
; X64-NEXT: movzwl (%rdi,%rax,2), %eax
; X64-NEXT: retq
; X64-NEXT: .LBB52_3:
diff --git a/llvm/test/CodeGen/X86/atomicrmw-fadd-fp-vector.ll b/llvm/test/CodeGen/X86/atomicrmw-fadd-fp-vector.ll
index 105ee7f..e118f5d 100644
--- a/llvm/test/CodeGen/X86/atomicrmw-fadd-fp-vector.ll
+++ b/llvm/test/CodeGen/X86/atomicrmw-fadd-fp-vector.ll
@@ -46,8 +46,9 @@ define <2 x half> @test_atomicrmw_fadd_v2f16_align4(ptr addrspace(1) %ptr, <2 x
; CHECK-NEXT: orl %edx, %eax
; CHECK-NEXT: lock cmpxchgl %ecx, (%rbx)
; CHECK-NEXT: setne %cl
-; CHECK-NEXT: pinsrw $0, %eax, %xmm0
+; CHECK-NEXT: movl %eax, %edx
; CHECK-NEXT: shrl $16, %eax
+; CHECK-NEXT: pinsrw $0, %edx, %xmm0
; CHECK-NEXT: pinsrw $0, %eax, %xmm1
; CHECK-NEXT: testb %cl, %cl
; CHECK-NEXT: jne .LBB0_1
diff --git a/llvm/test/CodeGen/X86/basic-block-sections-list.ll b/llvm/test/CodeGen/X86/basic-block-sections-list.ll
index 45ef452..d652a540 100644
--- a/llvm/test/CodeGen/X86/basic-block-sections-list.ll
+++ b/llvm/test/CodeGen/X86/basic-block-sections-list.ll
@@ -1,17 +1,13 @@
-;; Check the basic block sections list option.
-;; version 0 profile:
-; RUN: echo '!_Z3foob' > %t1
+;; Check that specifying the function in the basic block sections profile
+;; without any other directives is a noop.
;;
-;; version 1 profile:
-; RUN: echo 'v1' > %t2
-; RUN: echo 'f _Z3foob' >> %t2
+;; Specify the bb sections profile:
+; RUN: echo 'v1' > %t
+; RUN: echo 'f _Z3foob' >> %t
;;
-; RUN: llc < %s -mtriple=x86_64-pc-linux -function-sections -basic-block-sections=%t1 -unique-basic-block-section-names | FileCheck %s -check-prefix=LINUX-SECTIONS --check-prefix=LINUX-SECTIONS-FUNCTION-SECTION
-; RUN: llc < %s -mtriple=x86_64-pc-linux -basic-block-sections=%t1 -unique-basic-block-section-names | FileCheck %s -check-prefix=LINUX-SECTIONS --check-prefix=LINUX-SECTIONS-NO-FUNCTION-SECTION
-; RUN: llc < %s -mtriple=x86_64-pc-linux -basic-block-sections=%t1 -unique-basic-block-section-names --bbsections-guided-section-prefix=false | FileCheck %s -check-prefix=LINUX-SECTIONS-NO-GUIDED-PREFIX
-; RUN: llc < %s -mtriple=x86_64-pc-linux -function-sections -basic-block-sections=%t2 -unique-basic-block-section-names | FileCheck %s -check-prefix=LINUX-SECTIONS --check-prefix=LINUX-SECTIONS-FUNCTION-SECTION
-; RUN: llc < %s -mtriple=x86_64-pc-linux -basic-block-sections=%t2 -unique-basic-block-section-names | FileCheck %s -check-prefix=LINUX-SECTIONS --check-prefix=LINUX-SECTIONS-NO-FUNCTION-SECTION
-; RUN: llc < %s -mtriple=x86_64-pc-linux -basic-block-sections=%t2 -unique-basic-block-section-names --bbsections-guided-section-prefix=false | FileCheck %s -check-prefix=LINUX-SECTIONS-NO-GUIDED-PREFIX
+; RUN: llc < %s -mtriple=x86_64-pc-linux -function-sections -basic-block-sections=%t > %bbsections
+; RUN: llc < %s -mtriple=x86_64-pc-linux -function-sections > %orig
+; RUN: diff -u %orig %bbsections
define i32 @_Z3foob(i1 zeroext %0) nounwind {
%2 = alloca i32, align 4
@@ -41,45 +37,3 @@ define i32 @_Z3foob(i1 zeroext %0) nounwind {
declare i32 @_Z3barv() #1
declare i32 @_Z3bazv() #1
-
-define i32 @_Z3zipb(i1 zeroext %0) nounwind {
- %2 = alloca i32, align 4
- %3 = alloca i8, align 1
- %4 = zext i1 %0 to i8
- store i8 %4, ptr %3, align 1
- %5 = load i8, ptr %3, align 1
- %6 = trunc i8 %5 to i1
- %7 = zext i1 %6 to i32
- %8 = icmp sgt i32 %7, 0
- br i1 %8, label %9, label %11
-
-9: ; preds = %1
- %10 = call i32 @_Z3barv()
- store i32 %10, ptr %2, align 4
- br label %13
-
-11: ; preds = %1
- %12 = call i32 @_Z3bazv()
- store i32 %12, ptr %2, align 4
- br label %13
-
-13: ; preds = %11, %9
- %14 = load i32, ptr %2, align 4
- ret i32 %14
-}
-
-; LINUX-SECTIONS-NO-GUIDED-PREFIX: .section .text._Z3foob,"ax",@progbits
-; LINUX-SECTIONS: .section .text.hot._Z3foob,"ax",@progbits
-; LINUX-SECTIONS: _Z3foob:
-; LINUX-SECTIONS: .section .text.hot._Z3foob._Z3foob.__part.1,"ax",@progbits
-; LINUX-SECTIONS: _Z3foob.__part.1:
-; LINUX-SECTIONS: .section .text.hot._Z3foob._Z3foob.__part.2,"ax",@progbits
-; LINUX-SECTIONS: _Z3foob.__part.2:
-; LINUX-SECTIONS: .section .text.hot._Z3foob._Z3foob.__part.3,"ax",@progbits
-; LINUX-SECTIONS: _Z3foob.__part.3:
-
-; LINUX-SECTIONS-FUNCTION-SECTION: .section .text._Z3zipb,"ax",@progbits
-; LINUX-SECTIONS-NO-FUNCTION-SECTION-NOT: .section .text{{.*}}._Z3zipb,"ax",@progbits
-; LINUX-SECTIONS: _Z3zipb:
-; LINUX-SECTIONS-NOT: .section .text{{.*}}._Z3zipb.__part.{{[0-9]+}},"ax",@progbits
-; LINUX-SECTIONS-NOT: _Z3zipb.__part.{{[0-9]+}}:
diff --git a/llvm/test/CodeGen/X86/basic-block-sections-source-drift.ll b/llvm/test/CodeGen/X86/basic-block-sections-source-drift.ll
index d481b14..6e0db20 100644
--- a/llvm/test/CodeGen/X86/basic-block-sections-source-drift.ll
+++ b/llvm/test/CodeGen/X86/basic-block-sections-source-drift.ll
@@ -1,6 +1,8 @@
-; RUN: echo "!foo" > %t.order.txt
-; RUN: llc < %s -mtriple=x86_64-pc-linux -basic-block-sections=%t.order.txt | FileCheck --check-prefix=SOURCE-DRIFT %s
-; RUN: llc < %s -mtriple=x86_64-pc-linux -basic-block-sections=%t.order.txt -bbsections-detect-source-drift=false | FileCheck --check-prefix=HASH-CHECK-DISABLED %s
+; RUN: echo "v1" > %t
+; RUN: echo "f foo" >> %t
+; RUN: echo "c 0" >> %t
+; RUN: llc < %s -mtriple=x86_64-pc-linux -basic-block-sections=%t | FileCheck --check-prefix=SOURCE-DRIFT %s
+; RUN: llc < %s -mtriple=x86_64-pc-linux -basic-block-sections=%t -bbsections-detect-source-drift=false | FileCheck --check-prefix=HASH-CHECK-DISABLED %s
define dso_local i32 @foo(i1 zeroext %0, i1 zeroext %1) !annotation !1 {
br i1 %0, label %5, label %3
diff --git a/llvm/test/CodeGen/X86/bitcast-vector-bool.ll b/llvm/test/CodeGen/X86/bitcast-vector-bool.ll
index 86d7df0c..fae1ff9 100644
--- a/llvm/test/CodeGen/X86/bitcast-vector-bool.ll
+++ b/llvm/test/CodeGen/X86/bitcast-vector-bool.ll
@@ -216,8 +216,8 @@ define i1 @trunc_v8i16_cmp(<8 x i16> %a0) nounwind {
define i8 @bitcast_v16i8_to_v2i8(<16 x i8> %a0) nounwind {
; SSE-LABEL: bitcast_v16i8_to_v2i8:
; SSE: # %bb.0:
-; SSE-NEXT: pmovmskb %xmm0, %ecx
-; SSE-NEXT: movl %ecx, %eax
+; SSE-NEXT: pmovmskb %xmm0, %eax
+; SSE-NEXT: movl %eax, %ecx
; SSE-NEXT: shrl $8, %eax
; SSE-NEXT: addb %cl, %al
; SSE-NEXT: # kill: def $al killed $al killed $eax
@@ -225,8 +225,8 @@ define i8 @bitcast_v16i8_to_v2i8(<16 x i8> %a0) nounwind {
;
; AVX12-LABEL: bitcast_v16i8_to_v2i8:
; AVX12: # %bb.0:
-; AVX12-NEXT: vpmovmskb %xmm0, %ecx
-; AVX12-NEXT: movl %ecx, %eax
+; AVX12-NEXT: vpmovmskb %xmm0, %eax
+; AVX12-NEXT: movl %eax, %ecx
; AVX12-NEXT: shrl $8, %eax
; AVX12-NEXT: addb %cl, %al
; AVX12-NEXT: # kill: def $al killed $al killed $eax
@@ -441,8 +441,8 @@ define i8 @bitcast_v16i16_to_v2i8(<16 x i16> %a0) nounwind {
; SSE-LABEL: bitcast_v16i16_to_v2i8:
; SSE: # %bb.0:
; SSE-NEXT: packsswb %xmm1, %xmm0
-; SSE-NEXT: pmovmskb %xmm0, %ecx
-; SSE-NEXT: movl %ecx, %eax
+; SSE-NEXT: pmovmskb %xmm0, %eax
+; SSE-NEXT: movl %eax, %ecx
; SSE-NEXT: shrl $8, %eax
; SSE-NEXT: addb %cl, %al
; SSE-NEXT: # kill: def $al killed $al killed $eax
@@ -452,8 +452,8 @@ define i8 @bitcast_v16i16_to_v2i8(<16 x i16> %a0) nounwind {
; AVX1: # %bb.0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpmovmskb %xmm0, %ecx
-; AVX1-NEXT: movl %ecx, %eax
+; AVX1-NEXT: vpmovmskb %xmm0, %eax
+; AVX1-NEXT: movl %eax, %ecx
; AVX1-NEXT: shrl $8, %eax
; AVX1-NEXT: addb %cl, %al
; AVX1-NEXT: # kill: def $al killed $al killed $eax
@@ -464,8 +464,8 @@ define i8 @bitcast_v16i16_to_v2i8(<16 x i16> %a0) nounwind {
; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
-; AVX2-NEXT: vpmovmskb %xmm0, %ecx
-; AVX2-NEXT: movl %ecx, %eax
+; AVX2-NEXT: vpmovmskb %xmm0, %eax
+; AVX2-NEXT: movl %eax, %ecx
; AVX2-NEXT: shrl $8, %eax
; AVX2-NEXT: addb %cl, %al
; AVX2-NEXT: # kill: def $al killed $al killed $eax
@@ -762,8 +762,8 @@ define i8 @bitcast_v16i32_to_v2i8(<16 x i32> %a0) nounwind {
; SSE-NEXT: packssdw %xmm3, %xmm2
; SSE-NEXT: packssdw %xmm1, %xmm0
; SSE-NEXT: packsswb %xmm2, %xmm0
-; SSE-NEXT: pmovmskb %xmm0, %ecx
-; SSE-NEXT: movl %ecx, %eax
+; SSE-NEXT: pmovmskb %xmm0, %eax
+; SSE-NEXT: movl %eax, %ecx
; SSE-NEXT: shrl $8, %eax
; SSE-NEXT: addb %cl, %al
; SSE-NEXT: # kill: def $al killed $al killed $eax
@@ -776,8 +776,8 @@ define i8 @bitcast_v16i32_to_v2i8(<16 x i32> %a0) nounwind {
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpmovmskb %xmm0, %ecx
-; AVX1-NEXT: movl %ecx, %eax
+; AVX1-NEXT: vpmovmskb %xmm0, %eax
+; AVX1-NEXT: movl %eax, %ecx
; AVX1-NEXT: shrl $8, %eax
; AVX1-NEXT: addb %cl, %al
; AVX1-NEXT: # kill: def $al killed $al killed $eax
@@ -793,8 +793,8 @@ define i8 @bitcast_v16i32_to_v2i8(<16 x i32> %a0) nounwind {
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
-; AVX2-NEXT: vpmovmskb %xmm0, %ecx
-; AVX2-NEXT: movl %ecx, %eax
+; AVX2-NEXT: vpmovmskb %xmm0, %eax
+; AVX2-NEXT: movl %eax, %ecx
; AVX2-NEXT: shrl $8, %eax
; AVX2-NEXT: addb %cl, %al
; AVX2-NEXT: # kill: def $al killed $al killed $eax
diff --git a/llvm/test/CodeGen/X86/coalescer-dead-flag-verifier-error.ll b/llvm/test/CodeGen/X86/coalescer-dead-flag-verifier-error.ll
index 4d41c84..a42a715 100644
--- a/llvm/test/CodeGen/X86/coalescer-dead-flag-verifier-error.ll
+++ b/llvm/test/CodeGen/X86/coalescer-dead-flag-verifier-error.ll
@@ -7,8 +7,8 @@
define void @_ZNK4llvm5APInt21multiplicativeInverseERKS0_(ptr %r) {
; CHECK-LABEL: _ZNK4llvm5APInt21multiplicativeInverseERKS0_:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: xorl %edx, %edx
+; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: xorl %ecx, %ecx
; CHECK-NEXT: jmp .LBB0_1
; CHECK-NEXT: .p2align 4
@@ -68,8 +68,8 @@ _ZNK4llvm5APInt13getActiveBitsEv.exit.i.i: ; preds = %for.body.i.i.i.i.i
define void @_ZNK4llvm5APInt21multiplicativeInverseERKS0__assert(ptr %r) {
; CHECK-LABEL: _ZNK4llvm5APInt21multiplicativeInverseERKS0__assert:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: xorl %edx, %edx
+; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: xorl %ecx, %ecx
; CHECK-NEXT: jmp .LBB1_1
; CHECK-NEXT: .p2align 4
diff --git a/llvm/test/CodeGen/X86/fold-loop-of-urem.ll b/llvm/test/CodeGen/X86/fold-loop-of-urem.ll
index c1beb7c..c9c88f7 100644
--- a/llvm/test/CodeGen/X86/fold-loop-of-urem.ll
+++ b/llvm/test/CodeGen/X86/fold-loop-of-urem.ll
@@ -1031,31 +1031,30 @@ define void @simple_urem_fail_intermediate_inc(i32 %N, i32 %rem_amt) nounwind {
; CHECK-NEXT: testl %edi, %edi
; CHECK-NEXT: je .LBB17_4
; CHECK-NEXT: # %bb.1: # %for.body.preheader
-; CHECK-NEXT: pushq %r15
+; CHECK-NEXT: pushq %rbp
; CHECK-NEXT: pushq %r14
; CHECK-NEXT: pushq %rbx
; CHECK-NEXT: movl %esi, %ebx
; CHECK-NEXT: movl %edi, %r14d
; CHECK-NEXT: negl %r14d
-; CHECK-NEXT: movl $1, %r15d
+; CHECK-NEXT: movl $1, %ebp
; CHECK-NEXT: .p2align 4
; CHECK-NEXT: .LBB17_2: # %for.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: movl %r15d, %eax
+; CHECK-NEXT: movl %ebp, %eax
; CHECK-NEXT: xorl %edx, %edx
; CHECK-NEXT: divl %ebx
; CHECK-NEXT: movl %edx, %edi
; CHECK-NEXT: callq use.i32@PLT
-; CHECK-NEXT: leal 1(%r14,%r15), %eax
-; CHECK-NEXT: movl %r15d, %ecx
-; CHECK-NEXT: incl %ecx
+; CHECK-NEXT: movl %ebp, %eax
+; CHECK-NEXT: incl %ebp
+; CHECK-NEXT: leal 1(%r14,%rax), %eax
; CHECK-NEXT: cmpl $1, %eax
-; CHECK-NEXT: movl %ecx, %r15d
; CHECK-NEXT: jne .LBB17_2
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: popq %rbx
; CHECK-NEXT: popq %r14
-; CHECK-NEXT: popq %r15
+; CHECK-NEXT: popq %rbp
; CHECK-NEXT: .LBB17_4: # %for.cond.cleanup
; CHECK-NEXT: retq
entry:
@@ -1199,32 +1198,31 @@ define void @simple_urem_to_sel_non_zero_start_through_add(i32 %N, i32 %rem_amt_
; CHECK-NEXT: cmpl $3, %edi
; CHECK-NEXT: jb .LBB21_4
; CHECK-NEXT: # %bb.1: # %for.body.preheader
-; CHECK-NEXT: pushq %r15
+; CHECK-NEXT: pushq %rbp
; CHECK-NEXT: pushq %r14
; CHECK-NEXT: pushq %rbx
; CHECK-NEXT: movl %esi, %ebx
; CHECK-NEXT: movl %edi, %r14d
; CHECK-NEXT: orl $16, %ebx
; CHECK-NEXT: negl %r14d
-; CHECK-NEXT: movl $7, %r15d
+; CHECK-NEXT: movl $7, %ebp
; CHECK-NEXT: .p2align 4
; CHECK-NEXT: .LBB21_2: # %for.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: movl %r15d, %eax
+; CHECK-NEXT: movl %ebp, %eax
; CHECK-NEXT: xorl %edx, %edx
; CHECK-NEXT: divl %ebx
; CHECK-NEXT: movl %edx, %edi
; CHECK-NEXT: callq use.i32@PLT
-; CHECK-NEXT: leal 1(%r14,%r15), %eax
-; CHECK-NEXT: movl %r15d, %ecx
-; CHECK-NEXT: incl %ecx
+; CHECK-NEXT: movl %ebp, %eax
+; CHECK-NEXT: incl %ebp
+; CHECK-NEXT: leal 1(%r14,%rax), %eax
; CHECK-NEXT: cmpl $5, %eax
-; CHECK-NEXT: movl %ecx, %r15d
; CHECK-NEXT: jne .LBB21_2
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: popq %rbx
; CHECK-NEXT: popq %r14
-; CHECK-NEXT: popq %r15
+; CHECK-NEXT: popq %rbp
; CHECK-NEXT: .LBB21_4: # %for.cond.cleanup
; CHECK-NEXT: retq
entry:
@@ -1251,32 +1249,31 @@ define void @simple_urem_to_sel_non_zero_start_through_add_fail_missing_nuw(i32
; CHECK-NEXT: cmpl $3, %edi
; CHECK-NEXT: jb .LBB22_4
; CHECK-NEXT: # %bb.1: # %for.body.preheader
-; CHECK-NEXT: pushq %r15
+; CHECK-NEXT: pushq %rbp
; CHECK-NEXT: pushq %r14
; CHECK-NEXT: pushq %rbx
; CHECK-NEXT: movl %esi, %ebx
; CHECK-NEXT: movl %edi, %r14d
; CHECK-NEXT: orl $16, %ebx
; CHECK-NEXT: negl %r14d
-; CHECK-NEXT: movl $7, %r15d
+; CHECK-NEXT: movl $7, %ebp
; CHECK-NEXT: .p2align 4
; CHECK-NEXT: .LBB22_2: # %for.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: movl %r15d, %eax
+; CHECK-NEXT: movl %ebp, %eax
; CHECK-NEXT: xorl %edx, %edx
; CHECK-NEXT: divl %ebx
; CHECK-NEXT: movl %edx, %edi
; CHECK-NEXT: callq use.i32@PLT
-; CHECK-NEXT: leal 1(%r14,%r15), %eax
-; CHECK-NEXT: movl %r15d, %ecx
-; CHECK-NEXT: incl %ecx
+; CHECK-NEXT: movl %ebp, %eax
+; CHECK-NEXT: incl %ebp
+; CHECK-NEXT: leal 1(%r14,%rax), %eax
; CHECK-NEXT: cmpl $5, %eax
-; CHECK-NEXT: movl %ecx, %r15d
; CHECK-NEXT: jne .LBB22_2
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: popq %rbx
; CHECK-NEXT: popq %r14
-; CHECK-NEXT: popq %r15
+; CHECK-NEXT: popq %rbp
; CHECK-NEXT: .LBB22_4: # %for.cond.cleanup
; CHECK-NEXT: retq
entry:
@@ -1303,31 +1300,30 @@ define void @simple_urem_to_sel_non_zero_start_through_add_fail_no_simplify_rem(
; CHECK-NEXT: cmpl $3, %edi
; CHECK-NEXT: jb .LBB23_4
; CHECK-NEXT: # %bb.1: # %for.body.preheader
-; CHECK-NEXT: pushq %r15
+; CHECK-NEXT: pushq %rbp
; CHECK-NEXT: pushq %r14
; CHECK-NEXT: pushq %rbx
; CHECK-NEXT: movl %esi, %ebx
; CHECK-NEXT: movl %edi, %r14d
; CHECK-NEXT: negl %r14d
-; CHECK-NEXT: movl $7, %r15d
+; CHECK-NEXT: movl $7, %ebp
; CHECK-NEXT: .p2align 4
; CHECK-NEXT: .LBB23_2: # %for.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: movl %r15d, %eax
+; CHECK-NEXT: movl %ebp, %eax
; CHECK-NEXT: xorl %edx, %edx
; CHECK-NEXT: divl %ebx
; CHECK-NEXT: movl %edx, %edi
; CHECK-NEXT: callq use.i32@PLT
-; CHECK-NEXT: leal 1(%r14,%r15), %eax
-; CHECK-NEXT: movl %r15d, %ecx
-; CHECK-NEXT: incl %ecx
+; CHECK-NEXT: movl %ebp, %eax
+; CHECK-NEXT: incl %ebp
+; CHECK-NEXT: leal 1(%r14,%rax), %eax
; CHECK-NEXT: cmpl $5, %eax
-; CHECK-NEXT: movl %ecx, %r15d
; CHECK-NEXT: jne .LBB23_2
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: popq %rbx
; CHECK-NEXT: popq %r14
-; CHECK-NEXT: popq %r15
+; CHECK-NEXT: popq %rbp
; CHECK-NEXT: .LBB23_4: # %for.cond.cleanup
; CHECK-NEXT: retq
entry:
@@ -1404,32 +1400,31 @@ define void @simple_urem_to_sel_non_zero_start_through_sub_no_simplfy(i32 %N, i3
; CHECK-NEXT: cmpl %edx, %edi
; CHECK-NEXT: jbe .LBB25_4
; CHECK-NEXT: # %bb.1: # %for.body.preheader
-; CHECK-NEXT: pushq %r15
+; CHECK-NEXT: pushq %rbp
; CHECK-NEXT: pushq %r14
; CHECK-NEXT: pushq %rbx
-; CHECK-NEXT: movl %edx, %r15d
-; CHECK-NEXT: movl %esi, %ebx
+; CHECK-NEXT: movl %edx, %ebx
+; CHECK-NEXT: movl %esi, %ebp
; CHECK-NEXT: movl %edi, %r14d
; CHECK-NEXT: negl %r14d
-; CHECK-NEXT: addl $-2, %r15d
+; CHECK-NEXT: addl $-2, %ebx
; CHECK-NEXT: .p2align 4
; CHECK-NEXT: .LBB25_2: # %for.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: movl %r15d, %eax
+; CHECK-NEXT: movl %ebx, %eax
; CHECK-NEXT: xorl %edx, %edx
-; CHECK-NEXT: divl %ebx
+; CHECK-NEXT: divl %ebp
; CHECK-NEXT: movl %edx, %edi
; CHECK-NEXT: callq use.i32@PLT
-; CHECK-NEXT: leal 1(%r14,%r15), %eax
-; CHECK-NEXT: movl %r15d, %ecx
-; CHECK-NEXT: incl %ecx
+; CHECK-NEXT: movl %ebx, %eax
+; CHECK-NEXT: incl %ebx
+; CHECK-NEXT: leal 1(%r14,%rax), %eax
; CHECK-NEXT: cmpl $-2, %eax
-; CHECK-NEXT: movl %ecx, %r15d
; CHECK-NEXT: jne .LBB25_2
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: popq %rbx
; CHECK-NEXT: popq %r14
-; CHECK-NEXT: popq %r15
+; CHECK-NEXT: popq %rbp
; CHECK-NEXT: .LBB25_4: # %for.cond.cleanup
; CHECK-NEXT: retq
entry:
diff --git a/llvm/test/CodeGen/X86/freeze-binary.ll b/llvm/test/CodeGen/X86/freeze-binary.ll
index e223765..46b2571 100644
--- a/llvm/test/CodeGen/X86/freeze-binary.ll
+++ b/llvm/test/CodeGen/X86/freeze-binary.ll
@@ -490,20 +490,21 @@ define i32 @freeze_ashr_exact(i32 %a0) nounwind {
define i32 @freeze_ashr_exact_extra_use(i32 %a0, ptr %escape) nounwind {
; X86-LABEL: freeze_ashr_exact_extra_use:
; X86: # %bb.0:
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: sarl $3, %ecx
-; X86-NEXT: movl %ecx, (%eax)
-; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: sarl $3, %eax
+; X86-NEXT: movl %eax, %edx
; X86-NEXT: sarl $6, %eax
+; X86-NEXT: movl %edx, (%ecx)
; X86-NEXT: retl
;
; X64-LABEL: freeze_ashr_exact_extra_use:
; X64: # %bb.0:
-; X64-NEXT: sarl $3, %edi
-; X64-NEXT: movl %edi, (%rsi)
; X64-NEXT: movl %edi, %eax
+; X64-NEXT: sarl $3, %eax
+; X64-NEXT: movl %eax, %ecx
; X64-NEXT: sarl $6, %eax
+; X64-NEXT: movl %ecx, (%rsi)
; X64-NEXT: retq
%x = ashr exact i32 %a0, 3
%y = freeze i32 %x
@@ -604,20 +605,21 @@ define i32 @freeze_lshr_exact(i32 %a0) nounwind {
define i32 @freeze_lshr_exact_extra_use(i32 %a0, ptr %escape) nounwind {
; X86-LABEL: freeze_lshr_exact_extra_use:
; X86: # %bb.0:
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: shrl $3, %ecx
-; X86-NEXT: movl %ecx, (%eax)
-; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shrl $3, %eax
+; X86-NEXT: movl %eax, %edx
; X86-NEXT: shrl $5, %eax
+; X86-NEXT: movl %edx, (%ecx)
; X86-NEXT: retl
;
; X64-LABEL: freeze_lshr_exact_extra_use:
; X64: # %bb.0:
-; X64-NEXT: shrl $3, %edi
-; X64-NEXT: movl %edi, (%rsi)
; X64-NEXT: movl %edi, %eax
+; X64-NEXT: shrl $3, %eax
+; X64-NEXT: movl %eax, %ecx
; X64-NEXT: shrl $5, %eax
+; X64-NEXT: movl %ecx, (%rsi)
; X64-NEXT: retq
%x = lshr exact i32 %a0, 3
%y = freeze i32 %x
diff --git a/llvm/test/CodeGen/X86/i128-mul.ll b/llvm/test/CodeGen/X86/i128-mul.ll
index cffd88c..477a0dc 100644
--- a/llvm/test/CodeGen/X86/i128-mul.ll
+++ b/llvm/test/CodeGen/X86/i128-mul.ll
@@ -111,62 +111,63 @@ define i64 @mul1(i64 %n, ptr nocapture %z, ptr nocapture %x, i64 %y) nounwind {
; X86-NOBMI-NEXT: orl %ecx, %eax
; X86-NOBMI-NEXT: je .LBB1_3
; X86-NOBMI-NEXT: # %bb.1: # %for.body.preheader
-; X86-NOBMI-NEXT: xorl %eax, %eax
-; X86-NOBMI-NEXT: xorl %edx, %edx
+; X86-NOBMI-NEXT: xorl %esi, %esi
; X86-NOBMI-NEXT: xorl %ecx, %ecx
-; X86-NOBMI-NEXT: movl $0, (%esp) # 4-byte Folded Spill
+; X86-NOBMI-NEXT: xorl %edi, %edi
+; X86-NOBMI-NEXT: xorl %ebp, %ebp
; X86-NOBMI-NEXT: .p2align 4
; X86-NOBMI-NEXT: .LBB1_2: # %for.body
; X86-NOBMI-NEXT: # =>This Inner Loop Header: Depth=1
-; X86-NOBMI-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NOBMI-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NOBMI-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NOBMI-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NOBMI-NEXT: movl (%eax,%ecx,8), %edi
-; X86-NOBMI-NEXT: movl 4(%eax,%ecx,8), %ebx
+; X86-NOBMI-NEXT: movl (%eax,%edi,8), %ebp
+; X86-NOBMI-NEXT: movl 4(%eax,%edi,8), %ebx
; X86-NOBMI-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NOBMI-NEXT: movl %edi, %eax
-; X86-NOBMI-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-NOBMI-NEXT: mull %esi
-; X86-NOBMI-NEXT: movl %edx, %ebp
+; X86-NOBMI-NEXT: movl %ebp, %eax
+; X86-NOBMI-NEXT: mull {{[0-9]+}}(%esp)
+; X86-NOBMI-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NOBMI-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NOBMI-NEXT: movl %ebx, %eax
-; X86-NOBMI-NEXT: mull %esi
-; X86-NOBMI-NEXT: movl %edx, %ebx
-; X86-NOBMI-NEXT: movl %eax, %esi
-; X86-NOBMI-NEXT: addl %ebp, %esi
-; X86-NOBMI-NEXT: adcl $0, %ebx
-; X86-NOBMI-NEXT: movl %edi, %eax
+; X86-NOBMI-NEXT: mull {{[0-9]+}}(%esp)
+; X86-NOBMI-NEXT: movl %eax, %ebx
+; X86-NOBMI-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X86-NOBMI-NEXT: adcl $0, %edx
+; X86-NOBMI-NEXT: movl %edx, (%esp) # 4-byte Spill
+; X86-NOBMI-NEXT: movl %ebp, %eax
; X86-NOBMI-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NOBMI-NEXT: mull %edx
-; X86-NOBMI-NEXT: movl %edx, %ebp
-; X86-NOBMI-NEXT: movl %eax, %edi
-; X86-NOBMI-NEXT: addl %esi, %edi
-; X86-NOBMI-NEXT: adcl %ebx, %ebp
-; X86-NOBMI-NEXT: setb %bl
+; X86-NOBMI-NEXT: movl %eax, %ebp
+; X86-NOBMI-NEXT: addl %ebx, %ebp
+; X86-NOBMI-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NOBMI-NEXT: adcl (%esp), %edx # 4-byte Folded Reload
+; X86-NOBMI-NEXT: movl %edx, %ebx
+; X86-NOBMI-NEXT: setb (%esp) # 1-byte Folded Spill
; X86-NOBMI-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NOBMI-NEXT: mull {{[0-9]+}}(%esp)
-; X86-NOBMI-NEXT: addl %ebp, %eax
-; X86-NOBMI-NEXT: movzbl %bl, %esi
-; X86-NOBMI-NEXT: movl {{[0-9]+}}(%esp), %ebp
-; X86-NOBMI-NEXT: adcl %esi, %edx
-; X86-NOBMI-NEXT: movl %ecx, %ebx
-; X86-NOBMI-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NOBMI-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; X86-NOBMI-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; X86-NOBMI-NEXT: adcl $0, %eax
-; X86-NOBMI-NEXT: adcl $0, %edx
-; X86-NOBMI-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-NOBMI-NEXT: movl %ecx, (%esi,%ebx,8)
-; X86-NOBMI-NEXT: movl %ebx, %ecx
-; X86-NOBMI-NEXT: movl %edi, 4(%esi,%ebx,8)
-; X86-NOBMI-NEXT: addl $1, %ecx
-; X86-NOBMI-NEXT: movl (%esp), %edi # 4-byte Reload
-; X86-NOBMI-NEXT: adcl $0, %edi
-; X86-NOBMI-NEXT: movl %ecx, %esi
-; X86-NOBMI-NEXT: xorl {{[0-9]+}}(%esp), %esi
-; X86-NOBMI-NEXT: movl %edi, (%esp) # 4-byte Spill
-; X86-NOBMI-NEXT: xorl %ebp, %edi
-; X86-NOBMI-NEXT: orl %esi, %edi
+; X86-NOBMI-NEXT: movl %eax, %esi
+; X86-NOBMI-NEXT: addl %ebx, %esi
+; X86-NOBMI-NEXT: movl %ecx, %eax
+; X86-NOBMI-NEXT: movzbl (%esp), %ebx # 1-byte Folded Reload
+; X86-NOBMI-NEXT: movl %edx, %ecx
+; X86-NOBMI-NEXT: adcl %ebx, %ecx
+; X86-NOBMI-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X86-NOBMI-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-NOBMI-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X86-NOBMI-NEXT: adcl %eax, %ebp
+; X86-NOBMI-NEXT: adcl $0, %esi
+; X86-NOBMI-NEXT: adcl $0, %ecx
+; X86-NOBMI-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOBMI-NEXT: movl %edx, (%eax,%edi,8)
+; X86-NOBMI-NEXT: movl %ebp, 4(%eax,%edi,8)
+; X86-NOBMI-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; X86-NOBMI-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NOBMI-NEXT: addl $1, %edi
+; X86-NOBMI-NEXT: adcl $0, %ebp
+; X86-NOBMI-NEXT: movl %edi, %eax
+; X86-NOBMI-NEXT: xorl %edx, %eax
+; X86-NOBMI-NEXT: movl %ebp, %edx
+; X86-NOBMI-NEXT: xorl %ebx, %edx
+; X86-NOBMI-NEXT: orl %eax, %edx
; X86-NOBMI-NEXT: jne .LBB1_2
; X86-NOBMI-NEXT: .LBB1_3: # %for.end
; X86-NOBMI-NEXT: xorl %eax, %eax
@@ -184,71 +185,66 @@ define i64 @mul1(i64 %n, ptr nocapture %z, ptr nocapture %x, i64 %y) nounwind {
; X86-BMI-NEXT: pushl %ebx
; X86-BMI-NEXT: pushl %edi
; X86-BMI-NEXT: pushl %esi
-; X86-BMI-NEXT: subl $20, %esp
+; X86-BMI-NEXT: subl $16, %esp
; X86-BMI-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-BMI-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-BMI-NEXT: orl %ecx, %eax
; X86-BMI-NEXT: je .LBB1_3
; X86-BMI-NEXT: # %bb.1: # %for.body.preheader
-; X86-BMI-NEXT: xorl %ecx, %ecx
-; X86-BMI-NEXT: xorl %eax, %eax
+; X86-BMI-NEXT: xorl %esi, %esi
+; X86-BMI-NEXT: xorl %edi, %edi
; X86-BMI-NEXT: xorl %ebx, %ebx
-; X86-BMI-NEXT: xorl %ebp, %ebp
+; X86-BMI-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X86-BMI-NEXT: .p2align 4
; X86-BMI-NEXT: .LBB1_2: # %for.body
; X86-BMI-NEXT: # =>This Inner Loop Header: Depth=1
-; X86-BMI-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-BMI-NEXT: movl %eax, (%esp) # 4-byte Spill
-; X86-BMI-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-BMI-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-BMI-NEXT: movl (%eax,%ebx,8), %ecx
-; X86-BMI-NEXT: movl 4(%eax,%ebx,8), %esi
-; X86-BMI-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-BMI-NEXT: movl 4(%eax,%ebx,8), %ebp
+; X86-BMI-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-BMI-NEXT: movl %ecx, %edx
-; X86-BMI-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-BMI-NEXT: mulxl %eax, %edx, %edi
+; X86-BMI-NEXT: mulxl {{[0-9]+}}(%esp), %edx, %eax
+; X86-BMI-NEXT: movl %eax, (%esp) # 4-byte Spill
; X86-BMI-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-BMI-NEXT: movl %esi, %edx
-; X86-BMI-NEXT: mulxl %eax, %esi, %eax
-; X86-BMI-NEXT: addl %edi, %esi
-; X86-BMI-NEXT: adcl $0, %eax
+; X86-BMI-NEXT: movl %ebp, %edx
+; X86-BMI-NEXT: mulxl {{[0-9]+}}(%esp), %eax, %ebp
+; X86-BMI-NEXT: addl (%esp), %eax # 4-byte Folded Reload
+; X86-BMI-NEXT: adcl $0, %ebp
; X86-BMI-NEXT: movl %ecx, %edx
-; X86-BMI-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-BMI-NEXT: mulxl %ecx, %edi, %ebp
-; X86-BMI-NEXT: addl %esi, %edi
-; X86-BMI-NEXT: adcl %eax, %ebp
+; X86-BMI-NEXT: mulxl {{[0-9]+}}(%esp), %ecx, %edx
+; X86-BMI-NEXT: addl %eax, %ecx
+; X86-BMI-NEXT: movl %edi, (%esp) # 4-byte Spill
+; X86-BMI-NEXT: movl %esi, %eax
+; X86-BMI-NEXT: adcl %ebp, %edx
+; X86-BMI-NEXT: movl %edx, %ebp
; X86-BMI-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-BMI-NEXT: mulxl %ecx, %ecx, %eax
+; X86-BMI-NEXT: mulxl {{[0-9]+}}(%esp), %esi, %edi
; X86-BMI-NEXT: setb %dl
-; X86-BMI-NEXT: addl %ebp, %ecx
-; X86-BMI-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; X86-BMI-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-BMI-NEXT: addl %ebp, %esi
; X86-BMI-NEXT: movzbl %dl, %edx
-; X86-BMI-NEXT: adcl %edx, %eax
-; X86-BMI-NEXT: movl %eax, %edx
-; X86-BMI-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-BMI-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; X86-BMI-NEXT: adcl (%esp), %edi # 4-byte Folded Reload
-; X86-BMI-NEXT: adcl $0, %ecx
-; X86-BMI-NEXT: adcl $0, %edx
-; X86-BMI-NEXT: movl %edx, (%esp) # 4-byte Spill
-; X86-BMI-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-BMI-NEXT: movl %eax, (%edx,%ebx,8)
-; X86-BMI-NEXT: movl %edi, 4(%edx,%ebx,8)
-; X86-BMI-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-BMI-NEXT: adcl %edx, %edi
+; X86-BMI-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; X86-BMI-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-BMI-NEXT: addl %eax, %edx
+; X86-BMI-NEXT: adcl (%esp), %ecx # 4-byte Folded Reload
+; X86-BMI-NEXT: adcl $0, %esi
+; X86-BMI-NEXT: adcl $0, %edi
+; X86-BMI-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-BMI-NEXT: movl %edx, (%eax,%ebx,8)
+; X86-BMI-NEXT: movl %ecx, 4(%eax,%ebx,8)
; X86-BMI-NEXT: addl $1, %ebx
-; X86-BMI-NEXT: adcl $0, %ebp
-; X86-BMI-NEXT: movl %ebx, %edx
-; X86-BMI-NEXT: xorl %esi, %edx
-; X86-BMI-NEXT: movl %ebp, %esi
-; X86-BMI-NEXT: xorl %edi, %esi
-; X86-BMI-NEXT: orl %edx, %esi
-; X86-BMI-NEXT: movl (%esp), %eax # 4-byte Reload
+; X86-BMI-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-BMI-NEXT: adcl $0, %ecx
+; X86-BMI-NEXT: movl %ebx, %eax
+; X86-BMI-NEXT: xorl {{[0-9]+}}(%esp), %eax
+; X86-BMI-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-BMI-NEXT: xorl %ebp, %ecx
+; X86-BMI-NEXT: orl %eax, %ecx
; X86-BMI-NEXT: jne .LBB1_2
; X86-BMI-NEXT: .LBB1_3: # %for.end
; X86-BMI-NEXT: xorl %eax, %eax
; X86-BMI-NEXT: xorl %edx, %edx
-; X86-BMI-NEXT: addl $20, %esp
+; X86-BMI-NEXT: addl $16, %esp
; X86-BMI-NEXT: popl %esi
; X86-BMI-NEXT: popl %edi
; X86-BMI-NEXT: popl %ebx
@@ -261,11 +257,12 @@ define i64 @mul1(i64 %n, ptr nocapture %z, ptr nocapture %x, i64 %y) nounwind {
; X64-NOBMI-NEXT: je .LBB1_3
; X64-NOBMI-NEXT: # %bb.1: # %for.body.preheader
; X64-NOBMI-NEXT: movq %rdx, %r8
-; X64-NOBMI-NEXT: xorl %r10d, %r10d
+; X64-NOBMI-NEXT: xorl %edx, %edx
; X64-NOBMI-NEXT: xorl %r9d, %r9d
; X64-NOBMI-NEXT: .p2align 4
; X64-NOBMI-NEXT: .LBB1_2: # %for.body
; X64-NOBMI-NEXT: # =>This Inner Loop Header: Depth=1
+; X64-NOBMI-NEXT: movq %rdx, %r10
; X64-NOBMI-NEXT: movq %rcx, %rax
; X64-NOBMI-NEXT: mulq (%r8,%r9,8)
; X64-NOBMI-NEXT: addq %r10, %rax
@@ -273,7 +270,6 @@ define i64 @mul1(i64 %n, ptr nocapture %z, ptr nocapture %x, i64 %y) nounwind {
; X64-NOBMI-NEXT: movq %rax, (%rsi,%r9,8)
; X64-NOBMI-NEXT: incq %r9
; X64-NOBMI-NEXT: cmpq %r9, %rdi
-; X64-NOBMI-NEXT: movq %rdx, %r10
; X64-NOBMI-NEXT: jne .LBB1_2
; X64-NOBMI-NEXT: .LBB1_3: # %for.end
; X64-NOBMI-NEXT: xorl %eax, %eax
@@ -285,11 +281,12 @@ define i64 @mul1(i64 %n, ptr nocapture %z, ptr nocapture %x, i64 %y) nounwind {
; X64-BMI-NEXT: je .LBB1_3
; X64-BMI-NEXT: # %bb.1: # %for.body.preheader
; X64-BMI-NEXT: movq %rdx, %rax
-; X64-BMI-NEXT: xorl %r9d, %r9d
+; X64-BMI-NEXT: xorl %edx, %edx
; X64-BMI-NEXT: xorl %r8d, %r8d
; X64-BMI-NEXT: .p2align 4
; X64-BMI-NEXT: .LBB1_2: # %for.body
; X64-BMI-NEXT: # =>This Inner Loop Header: Depth=1
+; X64-BMI-NEXT: movq %rdx, %r9
; X64-BMI-NEXT: movq %rcx, %rdx
; X64-BMI-NEXT: mulxq (%rax,%r8,8), %r10, %rdx
; X64-BMI-NEXT: addq %r9, %r10
@@ -297,7 +294,6 @@ define i64 @mul1(i64 %n, ptr nocapture %z, ptr nocapture %x, i64 %y) nounwind {
; X64-BMI-NEXT: movq %r10, (%rsi,%r8,8)
; X64-BMI-NEXT: incq %r8
; X64-BMI-NEXT: cmpq %r8, %rdi
-; X64-BMI-NEXT: movq %rdx, %r9
; X64-BMI-NEXT: jne .LBB1_2
; X64-BMI-NEXT: .LBB1_3: # %for.end
; X64-BMI-NEXT: xorl %eax, %eax
diff --git a/llvm/test/CodeGen/X86/icmp-abs-C.ll b/llvm/test/CodeGen/X86/icmp-abs-C.ll
index 53b70fa..c98889b 100644
--- a/llvm/test/CodeGen/X86/icmp-abs-C.ll
+++ b/llvm/test/CodeGen/X86/icmp-abs-C.ll
@@ -161,22 +161,22 @@ define i16 @ne_and_with_dom_abs(i16 %x) nounwind {
; X86-LABEL: ne_and_with_dom_abs:
; X86: # %bb.0:
; X86-NEXT: pushl %esi
-; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movswl %cx, %eax
-; X86-NEXT: sarl $15, %eax
-; X86-NEXT: xorl %eax, %ecx
-; X86-NEXT: subl %eax, %ecx
-; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movswl %ax, %ecx
+; X86-NEXT: sarl $15, %ecx
+; X86-NEXT: xorl %ecx, %eax
+; X86-NEXT: subl %ecx, %eax
+; X86-NEXT: movl %eax, %edx
; X86-NEXT: xorl $12312, %eax # imm = 0x3018
; X86-NEXT: movzwl %ax, %esi
-; X86-NEXT: xorl %edx, %edx
-; X86-NEXT: cmpw $64, %cx
-; X86-NEXT: setne %cl
+; X86-NEXT: xorl %ecx, %ecx
+; X86-NEXT: cmpw $64, %dx
+; X86-NEXT: setne %dl
; X86-NEXT: cmpl $2345, %esi # imm = 0x929
; X86-NEXT: jae .LBB3_2
; X86-NEXT: # %bb.1:
-; X86-NEXT: movb %cl, %dl
-; X86-NEXT: movl %edx, %eax
+; X86-NEXT: movb %dl, %cl
+; X86-NEXT: movl %ecx, %eax
; X86-NEXT: .LBB3_2:
; X86-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NEXT: popl %esi
diff --git a/llvm/test/CodeGen/X86/ldexp-avx512.ll b/llvm/test/CodeGen/X86/ldexp-avx512.ll
index ea93a91..21491bc 100644
--- a/llvm/test/CodeGen/X86/ldexp-avx512.ll
+++ b/llvm/test/CodeGen/X86/ldexp-avx512.ll
@@ -47,6 +47,187 @@ entry:
}
declare fp128 @ldexpl(fp128, i32) memory(none)
+define <8 x half> @test_ldexp_8xhalf(<8 x half> %x, <8 x i16> %exp) nounwind {
+; AVX512-LABEL: test_ldexp_8xhalf:
+; AVX512: # %bb.0:
+; AVX512-NEXT: subq $88, %rsp
+; AVX512-NEXT: vmovdqa %xmm1, (%rsp) # 16-byte Spill
+; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vpsrldq {{.*#+}} xmm2 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT: vcvtph2ps %xmm2, %xmm0
+; AVX512-NEXT: vpextrw $7, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = mem[3,3,3,3]
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
+; AVX512-NEXT: vpextrw $6, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX512-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
+; AVX512-NEXT: vpextrw $5, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = mem[1,0]
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
+; AVX512-NEXT: vpextrw $4, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX512-NEXT: vpsrlq $48, %xmm0, %xmm0
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
+; AVX512-NEXT: vpextrw $3, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = mem[1,1,3,3]
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
+; AVX512-NEXT: vpextrw $2, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX512-NEXT: vpsrld $16, %xmm0, %xmm0
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
+; AVX512-NEXT: vpextrw $1, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vcvtph2ps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
+; AVX512-NEXT: vmovd %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; AVX512-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX512-NEXT: addq $88, %rsp
+; AVX512-NEXT: retq
+;
+; AVX512VL-LABEL: test_ldexp_8xhalf:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: subq $88, %rsp
+; AVX512VL-NEXT: vmovdqa %xmm1, (%rsp) # 16-byte Spill
+; AVX512VL-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vpsrldq {{.*#+}} xmm2 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512VL-NEXT: vcvtph2ps %xmm2, %xmm0
+; AVX512VL-NEXT: vpextrw $7, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = mem[3,3,3,3]
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
+; AVX512VL-NEXT: vpextrw $6, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512VL-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX512VL-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
+; AVX512VL-NEXT: vpextrw $5, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = mem[1,0]
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
+; AVX512VL-NEXT: vpextrw $4, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512VL-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; AVX512VL-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vpsrlq $48, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
+; AVX512VL-NEXT: vpextrw $3, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = mem[1,1,3,3]
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
+; AVX512VL-NEXT: vpextrw $2, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512VL-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vpsrld $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
+; AVX512VL-NEXT: vpextrw $1, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vcvtph2ps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
+; AVX512VL-NEXT: vmovd %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512VL-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; AVX512VL-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX512VL-NEXT: addq $88, %rsp
+; AVX512VL-NEXT: retq
+ %r = call <8 x half> @llvm.ldexp.v8f16.v8i16(<8 x half> %x, <8 x i16> %exp)
+ ret <8 x half> %r
+}
+declare <8 x half> @llvm.ldexp.v8f16.v8i16(<8 x half>, <8 x i16>)
+
define <4 x float> @test_ldexp_4xfloat(<4 x float> %x, <4 x i32> %exp) nounwind {
; CHECK-LABEL: test_ldexp_4xfloat:
; CHECK: # %bb.0:
@@ -109,6 +290,381 @@ define <2 x double> @test_ldexp_2xdouble(<2 x double> %x, <2 x i32> %exp) nounwi
}
declare <2 x double> @llvm.ldexp.v2f64.v2i32(<2 x double>, <2 x i32>)
+define <16 x half> @test_ldexp_16xhalf(<16 x half> %x, <16 x i16> %exp) nounwind {
+; AVX512-LABEL: test_ldexp_16xhalf:
+; AVX512: # %bb.0:
+; AVX512-NEXT: subq $168, %rsp
+; AVX512-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX512-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vpextrw $7, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = mem[3,3,3,3]
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512-NEXT: vpextrw $6, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX512-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512-NEXT: vpextrw $5, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX512-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = mem[1,0]
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512-NEXT: vpextrw $4, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512-NEXT: vpextrw $7, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX512-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = mem[3,3,3,3]
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512-NEXT: vpextrw $6, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
+; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512-NEXT: vpextrw $5, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = mem[1,0]
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512-NEXT: vpextrw $4, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512-NEXT: vpunpckldq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; AVX512-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
+; AVX512-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX512-NEXT: vpsrlq $48, %xmm0, %xmm0
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512-NEXT: vpextrw $3, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX512-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = mem[1,1,3,3]
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512-NEXT: vpextrw $2, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX512-NEXT: vpsrld $16, %xmm0, %xmm0
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512-NEXT: vpextrw $1, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vcvtph2ps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512-NEXT: vmovd %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512-NEXT: vpunpckldq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512-NEXT: vpsrlq $48, %xmm0, %xmm0
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512-NEXT: vpextrw $3, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = mem[1,1,3,3]
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512-NEXT: vpextrw $2, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512-NEXT: vpsrld $16, %xmm0, %xmm0
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512-NEXT: vpextrw $1, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX512-NEXT: vcvtph2ps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512-NEXT: vmovd %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; AVX512-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
+; AVX512-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[2],mem[2]
+; AVX512-NEXT: addq $168, %rsp
+; AVX512-NEXT: retq
+;
+; AVX512VL-LABEL: test_ldexp_16xhalf:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: subq $168, %rsp
+; AVX512VL-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512VL-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512VL-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX512VL-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vpextrw $7, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = mem[3,3,3,3]
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512VL-NEXT: vpextrw $6, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512VL-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX512VL-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512VL-NEXT: vpextrw $5, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX512VL-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = mem[1,0]
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512VL-NEXT: vpextrw $4, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512VL-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; AVX512VL-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512VL-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512VL-NEXT: vpextrw $7, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX512VL-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = mem[3,3,3,3]
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512VL-NEXT: vpextrw $6, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512VL-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
+; AVX512VL-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
+; AVX512VL-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512VL-NEXT: vpextrw $5, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = mem[1,0]
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512VL-NEXT: vpextrw $4, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512VL-NEXT: vpunpckldq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; AVX512VL-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512VL-NEXT: vpsrlq $48, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512VL-NEXT: vpextrw $3, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX512VL-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = mem[1,1,3,3]
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512VL-NEXT: vpextrw $2, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512VL-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
+; AVX512VL-NEXT: vpsrld $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512VL-NEXT: vpextrw $1, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vcvtph2ps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512VL-NEXT: vmovd %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512VL-NEXT: vpunpckldq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; AVX512VL-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vpsrlq $48, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512VL-NEXT: vpextrw $3, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = mem[1,1,3,3]
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512VL-NEXT: vpextrw $2, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512VL-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vpsrld $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512VL-NEXT: vpextrw $1, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX512VL-NEXT: vcvtph2ps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
+; AVX512VL-NEXT: vmovd %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512VL-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; AVX512VL-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
+; AVX512VL-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[2],mem[2]
+; AVX512VL-NEXT: addq $168, %rsp
+; AVX512VL-NEXT: retq
+ %r = call <16 x half> @llvm.ldexp.v16f16.v16i16(<16 x half> %x, <16 x i16> %exp)
+ ret <16 x half> %r
+}
+declare <16 x half> @llvm.ldexp.v16f16.v16i16(<16 x half>, <16 x i16>)
+
define <8 x float> @test_ldexp_8xfloat(<8 x float> %x, <8 x i32> %exp) nounwind {
; CHECK-LABEL: test_ldexp_8xfloat:
; CHECK: # %bb.0:
@@ -230,6 +786,735 @@ define <4 x double> @test_ldexp_4xdouble(<4 x double> %x, <4 x i32> %exp) nounwi
}
declare <4 x double> @llvm.ldexp.v4f64.v4i32(<4 x double>, <4 x i32>)
+define <32 x half> @test_ldexp_32xhalf(<32 x half> %x, <32 x i16> %exp) nounwind {
+; AVX512-LABEL: test_ldexp_32xhalf:
+; AVX512: # %bb.0:
+; AVX512-NEXT: subq $360, %rsp # imm = 0x168
+; AVX512-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT: vextracti32x4 $3, %zmm0, %xmm0
+; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vextracti32x4 $3, %zmm1, %xmm1
+; AVX512-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vpextrw $7, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX512-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = mem[3,3,3,3]
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512-NEXT: vpextrw $6, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX512-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512-NEXT: vpextrw $5, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = mem[1,0]
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512-NEXT: vpextrw $4, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512-NEXT: vpunpckldq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512-NEXT: vextracti32x4 $2, %zmm0, %xmm0
+; AVX512-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
+; AVX512-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512-NEXT: vextracti32x4 $2, %zmm1, %xmm1
+; AVX512-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vpextrw $7, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vpermilps $255, (%rsp), %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = mem[3,3,3,3]
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512-NEXT: vpextrw $6, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
+; AVX512-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512-NEXT: vpextrw $5, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = mem[1,0]
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512-NEXT: vpextrw $4, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; AVX512-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
+; AVX512-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX512-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vpextrw $7, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = mem[3,3,3,3]
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512-NEXT: vpextrw $6, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX512-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512-NEXT: vpextrw $5, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = mem[1,0]
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512-NEXT: vpextrw $4, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512-NEXT: vpextrw $7, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = mem[3,3,3,3]
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512-NEXT: vpextrw $6, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512-NEXT: vpextrw $5, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = mem[1,0]
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512-NEXT: vpextrw $4, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; AVX512-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
+; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
+; AVX512-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX512-NEXT: vpsrlq $48, %xmm0, %xmm0
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512-NEXT: vpextrw $3, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = mem[1,1,3,3]
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512-NEXT: vpextrw $2, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX512-NEXT: vpsrld $16, %xmm0, %xmm0
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512-NEXT: vpextrw $1, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vcvtph2ps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512-NEXT: vmovd %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
+; AVX512-NEXT: vpsrlq $48, %xmm0, %xmm0
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512-NEXT: vpextrw $3, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vmovshdup (%rsp), %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = mem[1,1,3,3]
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512-NEXT: vpextrw $2, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
+; AVX512-NEXT: vpsrld $16, %xmm0, %xmm0
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512-NEXT: vpextrw $1, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vcvtph2ps (%rsp), %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512-NEXT: vmovd %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; AVX512-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
+; AVX512-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX512-NEXT: vpsrlq $48, %xmm0, %xmm0
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512-NEXT: vpextrw $3, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = mem[1,1,3,3]
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512-NEXT: vpextrw $2, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX512-NEXT: vpsrld $16, %xmm0, %xmm0
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512-NEXT: vpextrw $1, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX512-NEXT: vcvtph2ps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512-NEXT: vmovd %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512-NEXT: vpsrlq $48, %xmm0, %xmm0
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512-NEXT: vpextrw $3, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX512-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = mem[1,1,3,3]
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512-NEXT: vpextrw $2, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512-NEXT: vpsrld $16, %xmm0, %xmm0
+; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512-NEXT: vpextrw $1, %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512-NEXT: vcvtph2ps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512-NEXT: vmovd %xmm1, %eax
+; AVX512-NEXT: movswl %ax, %edi
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: callq ldexpf@PLT
+; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512-NEXT: vpunpckldq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; AVX512-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
+; AVX512-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
+; AVX512-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
+; AVX512-NEXT: # zmm0 = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6]
+; AVX512-NEXT: addq $360, %rsp # imm = 0x168
+; AVX512-NEXT: retq
+;
+; AVX512VL-LABEL: test_ldexp_32xhalf:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: subq $360, %rsp # imm = 0x168
+; AVX512VL-NEXT: vmovdqu64 %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512VL-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512VL-NEXT: vextracti32x4 $3, %zmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vextracti32x4 $3, %zmm1, %xmm1
+; AVX512VL-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vpextrw $7, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX512VL-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = mem[3,3,3,3]
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512VL-NEXT: vpextrw $6, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512VL-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
+; AVX512VL-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX512VL-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512VL-NEXT: vpextrw $5, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = mem[1,0]
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512VL-NEXT: vpextrw $4, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512VL-NEXT: vpunpckldq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; AVX512VL-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512VL-NEXT: vextracti32x4 $2, %zmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
+; AVX512VL-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512VL-NEXT: vextracti32x4 $2, %zmm1, %xmm1
+; AVX512VL-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vpextrw $7, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vpermilps $255, (%rsp), %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = mem[3,3,3,3]
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512VL-NEXT: vpextrw $6, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512VL-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
+; AVX512VL-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512VL-NEXT: vpextrw $5, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = mem[1,0]
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512VL-NEXT: vpextrw $4, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512VL-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; AVX512VL-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512VL-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm0
+; AVX512VL-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512VL-NEXT: vextracti128 $1, %ymm1, %xmm1
+; AVX512VL-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vpextrw $7, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = mem[3,3,3,3]
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512VL-NEXT: vpextrw $6, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512VL-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; AVX512VL-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512VL-NEXT: vpextrw $5, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = mem[1,0]
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512VL-NEXT: vpextrw $4, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512VL-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; AVX512VL-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512VL-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512VL-NEXT: vpextrw $7, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = mem[3,3,3,3]
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512VL-NEXT: vpextrw $6, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512VL-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
+; AVX512VL-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512VL-NEXT: vpextrw $5, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = mem[1,0]
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512VL-NEXT: vpextrw $4, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512VL-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; AVX512VL-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
+; AVX512VL-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
+; AVX512VL-NEXT: vpsrlq $48, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512VL-NEXT: vpextrw $3, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = mem[1,1,3,3]
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512VL-NEXT: vpextrw $2, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512VL-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vpsrld $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512VL-NEXT: vpextrw $1, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vcvtph2ps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512VL-NEXT: vmovd %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512VL-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; AVX512VL-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vpsrlq $48, (%rsp), %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512VL-NEXT: vpextrw $3, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vmovshdup (%rsp), %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = mem[1,1,3,3]
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512VL-NEXT: vpextrw $2, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512VL-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vpsrld $16, (%rsp), %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512VL-NEXT: vpextrw $1, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vcvtph2ps (%rsp), %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512VL-NEXT: vmovd %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512VL-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; AVX512VL-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
+; AVX512VL-NEXT: vpsrlq $48, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512VL-NEXT: vpextrw $3, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = mem[1,1,3,3]
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512VL-NEXT: vpextrw $2, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512VL-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vpsrld $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512VL-NEXT: vpextrw $1, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX512VL-NEXT: vcvtph2ps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; AVX512VL-NEXT: vmovd %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512VL-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; AVX512VL-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vpsrlq $48, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512VL-NEXT: vpextrw $3, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX512VL-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = mem[1,1,3,3]
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512VL-NEXT: vpextrw $2, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512VL-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
+; AVX512VL-NEXT: vpsrld $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: vcvtph2ps %xmm0, %xmm0
+; AVX512VL-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512VL-NEXT: vpextrw $1, %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX512VL-NEXT: vcvtph2ps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: vmovdqu64 {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload
+; AVX512VL-NEXT: vmovd %xmm1, %eax
+; AVX512VL-NEXT: movswl %ax, %edi
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: callq ldexpf@PLT
+; AVX512VL-NEXT: vcvtps2ph $4, %xmm0, %xmm0
+; AVX512VL-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; AVX512VL-NEXT: vpunpckldq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
+; AVX512VL-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
+; AVX512VL-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 32-byte Folded Reload
+; AVX512VL-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm0 # 64-byte Folded Reload
+; AVX512VL-NEXT: # zmm0 = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6]
+; AVX512VL-NEXT: addq $360, %rsp # imm = 0x168
+; AVX512VL-NEXT: retq
+ %r = call <32 x half> @llvm.ldexp.v32f16.v32i16(<32 x half> %x, <32 x i16> %exp)
+ ret <32 x half> %r
+}
+declare <32 x half> @llvm.ldexp.v32f16.v32i16(<32 x half>, <32 x i16>)
+
define <16 x float> @test_ldexp_16xfloat(<16 x float> %x, <16 x i32> %exp) nounwind {
; CHECK-LABEL: test_ldexp_16xfloat:
; CHECK: # %bb.0:
@@ -462,6 +1747,3 @@ define <8 x double> @test_ldexp_8xdouble(<8 x double> %x, <8 x i32> %exp) nounwi
}
declare <8 x double> @llvm.ldexp.v8f64.v8i32(<8 x double>, <8 x i32>)
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; AVX512: {{.*}}
-; AVX512VL: {{.*}}
diff --git a/llvm/test/CodeGen/X86/llvm.sincospi.ll b/llvm/test/CodeGen/X86/llvm.sincospi.ll
new file mode 100644
index 0000000..5546c66
--- /dev/null
+++ b/llvm/test/CodeGen/X86/llvm.sincospi.ll
@@ -0,0 +1,233 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -mtriple=x86_64-apple-macosx10.9 < %s | FileCheck %s
+
+define { half, half } @test_sincospi_f16(half %a) #0 {
+; CHECK-LABEL: test_sincospi_f16:
+; CHECK: ## %bb.0:
+; CHECK-NEXT: subq $40, %rsp
+; CHECK-NEXT: pextrw $0, %xmm0, %eax
+; CHECK-NEXT: movzwl %ax, %edi
+; CHECK-NEXT: callq ___extendhfsf2
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rdi
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rsi
+; CHECK-NEXT: callq ___sincospif
+; CHECK-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: callq ___truncsfhf2
+; CHECK-NEXT: ## kill: def $ax killed $ax def $eax
+; CHECK-NEXT: pinsrw $0, %eax, %xmm0
+; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
+; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: callq ___truncsfhf2
+; CHECK-NEXT: ## kill: def $ax killed $ax def $eax
+; CHECK-NEXT: pinsrw $0, %eax, %xmm1
+; CHECK-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
+; CHECK-NEXT: addq $40, %rsp
+; CHECK-NEXT: retq
+ %result = call { half, half } @llvm.sincospi.f16(half %a)
+ ret { half, half } %result
+}
+
+define half @test_sincospi_f16_only_use_sin(half %a) #0 {
+; CHECK-LABEL: test_sincospi_f16_only_use_sin:
+; CHECK: ## %bb.0:
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: pextrw $0, %xmm0, %eax
+; CHECK-NEXT: movzwl %ax, %edi
+; CHECK-NEXT: callq ___extendhfsf2
+; CHECK-NEXT: movq %rsp, %rdi
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rsi
+; CHECK-NEXT: callq ___sincospif
+; CHECK-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: callq ___truncsfhf2
+; CHECK-NEXT: ## kill: def $ax killed $ax def $eax
+; CHECK-NEXT: pinsrw $0, %eax, %xmm0
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: retq
+ %result = call { half, half } @llvm.sincospi.f16(half %a)
+ %result.0 = extractvalue { half, half } %result, 0
+ ret half %result.0
+}
+
+define half @test_sincospi_f16_only_use_cos(half %a) #0 {
+; CHECK-LABEL: test_sincospi_f16_only_use_cos:
+; CHECK: ## %bb.0:
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: pextrw $0, %xmm0, %eax
+; CHECK-NEXT: movzwl %ax, %edi
+; CHECK-NEXT: callq ___extendhfsf2
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rdi
+; CHECK-NEXT: movq %rsp, %rsi
+; CHECK-NEXT: callq ___sincospif
+; CHECK-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: callq ___truncsfhf2
+; CHECK-NEXT: ## kill: def $ax killed $ax def $eax
+; CHECK-NEXT: pinsrw $0, %eax, %xmm0
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: retq
+ %result = call { half, half } @llvm.sincospi.f16(half %a)
+ %result.1 = extractvalue { half, half } %result, 1
+ ret half %result.1
+}
+
+define { <2 x half>, <2 x half> } @test_sincospi_v2f16(<2 x half> %a) #0 {
+; CHECK-LABEL: test_sincospi_v2f16:
+; CHECK: ## %bb.0:
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: subq $64, %rsp
+; CHECK-NEXT: pextrw $0, %xmm0, %ebx
+; CHECK-NEXT: psrld $16, %xmm0
+; CHECK-NEXT: pextrw $0, %xmm0, %eax
+; CHECK-NEXT: movzwl %ax, %edi
+; CHECK-NEXT: callq ___extendhfsf2
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rdi
+; CHECK-NEXT: movq %rsp, %rsi
+; CHECK-NEXT: callq ___sincospif
+; CHECK-NEXT: movzwl %bx, %edi
+; CHECK-NEXT: callq ___extendhfsf2
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rdi
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rsi
+; CHECK-NEXT: callq ___sincospif
+; CHECK-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: callq ___truncsfhf2
+; CHECK-NEXT: ## kill: def $ax killed $ax def $eax
+; CHECK-NEXT: pinsrw $0, %eax, %xmm0
+; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
+; CHECK-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: callq ___truncsfhf2
+; CHECK-NEXT: ## kill: def $ax killed $ax def $eax
+; CHECK-NEXT: pinsrw $0, %eax, %xmm0
+; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
+; CHECK-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: callq ___truncsfhf2
+; CHECK-NEXT: ## kill: def $ax killed $ax def $eax
+; CHECK-NEXT: pinsrw $0, %eax, %xmm0
+; CHECK-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
+; CHECK-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: callq ___truncsfhf2
+; CHECK-NEXT: ## kill: def $ax killed $ax def $eax
+; CHECK-NEXT: pinsrw $0, %eax, %xmm0
+; CHECK-NEXT: punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload
+; CHECK-NEXT: ## xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
+; CHECK-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload
+; CHECK-NEXT: punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Folded Reload
+; CHECK-NEXT: ## xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
+; CHECK-NEXT: addq $64, %rsp
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: retq
+ %result = call { <2 x half>, <2 x half> } @llvm.sincospi.v2f16(<2 x half> %a)
+ ret { <2 x half>, <2 x half> } %result
+}
+
+define { float, float } @test_sincospi_f32(float %a) #0 {
+; CHECK-LABEL: test_sincospi_f32:
+; CHECK: ## %bb.0:
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rdi
+; CHECK-NEXT: movq %rsp, %rsi
+; CHECK-NEXT: callq ___sincospif
+; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: retq
+ %result = call { float, float } @llvm.sincospi.f32(float %a)
+ ret { float, float } %result
+}
+
+define { <2 x float>, <2 x float> } @test_sincospi_v2f32(<2 x float> %a) #0 {
+; CHECK-LABEL: test_sincospi_v2f32:
+; CHECK: ## %bb.0:
+; CHECK-NEXT: subq $40, %rsp
+; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rdi
+; CHECK-NEXT: movq %rsp, %rsi
+; CHECK-NEXT: callq ___sincospif
+; CHECK-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
+; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rdi
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rsi
+; CHECK-NEXT: callq ___sincospif
+; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; CHECK-NEXT: addq $40, %rsp
+; CHECK-NEXT: retq
+ %result = call { <2 x float>, <2 x float> } @llvm.sincospi.v2f32(<2 x float> %a)
+ ret { <2 x float>, <2 x float> } %result
+}
+
+define { <3 x float>, <3 x float> } @test_sincospi_v3f32(<3 x float> %a) #0 {
+; CHECK-LABEL: test_sincospi_v3f32:
+; CHECK: ## %bb.0:
+; CHECK-NEXT: subq $56, %rsp
+; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rdi
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rsi
+; CHECK-NEXT: callq ___sincospif
+; CHECK-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
+; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rdi
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rsi
+; CHECK-NEXT: callq ___sincospif
+; CHECK-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
+; CHECK-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rdi
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rsi
+; CHECK-NEXT: callq ___sincospif
+; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; CHECK-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; CHECK-NEXT: addq $56, %rsp
+; CHECK-NEXT: retq
+ %result = call { <3 x float>, <3 x float> } @llvm.sincospi.v3f32(<3 x float> %a)
+ ret { <3 x float>, <3 x float> } %result
+}
+
+define { double, double } @test_sincospi_f64(double %a) #0 {
+; CHECK-LABEL: test_sincospi_f64:
+; CHECK: ## %bb.0:
+; CHECK-NEXT: subq $24, %rsp
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rdi
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rsi
+; CHECK-NEXT: callq ___sincospi
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: addq $24, %rsp
+; CHECK-NEXT: retq
+ %result = call { double, double } @llvm.sincospi.f64(double %a)
+ ret { double, double } %result
+}
+
+define { <2 x double>, <2 x double> } @test_sincospi_v2f64(<2 x double> %a) #0 {
+; CHECK-LABEL: test_sincospi_v2f64:
+; CHECK: ## %bb.0:
+; CHECK-NEXT: subq $56, %rsp
+; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rdi
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rsi
+; CHECK-NEXT: callq ___sincospi
+; CHECK-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
+; CHECK-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
+; CHECK-NEXT: leaq {{[0-9]+}}(%rsp), %rdi
+; CHECK-NEXT: movq %rsp, %rsi
+; CHECK-NEXT: callq ___sincospi
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: movhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1]
+; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; CHECK-NEXT: movhps {{.*#+}} xmm1 = xmm1[0,1],mem[0,1]
+; CHECK-NEXT: addq $56, %rsp
+; CHECK-NEXT: retq
+ %result = call { <2 x double>, <2 x double> } @llvm.sincospi.v2f64(<2 x double> %a)
+ ret { <2 x double>, <2 x double> } %result
+}
+
+attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/X86/masked_gather_scatter.ll b/llvm/test/CodeGen/X86/masked_gather_scatter.ll
index caec02e..2f691e7 100644
--- a/llvm/test/CodeGen/X86/masked_gather_scatter.ll
+++ b/llvm/test/CodeGen/X86/masked_gather_scatter.ll
@@ -255,9 +255,9 @@ define <8 x i32> @test7(ptr %base, <8 x i32> %ind, i8 %mask) {
; X64-KNL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-KNL-NEXT: kmovw %k1, %k2
; X64-KNL-NEXT: vpgatherdd (%rdi,%zmm0,4), %zmm1 {%k2}
-; X64-KNL-NEXT: vmovdqa64 %zmm1, %zmm2
-; X64-KNL-NEXT: vpgatherdd (%rdi,%zmm0,4), %zmm2 {%k1}
-; X64-KNL-NEXT: vpaddd %ymm2, %ymm1, %ymm0
+; X64-KNL-NEXT: vmovdqa %ymm1, %ymm2
+; X64-KNL-NEXT: vpgatherdd (%rdi,%zmm0,4), %zmm1 {%k1}
+; X64-KNL-NEXT: vpaddd %ymm1, %ymm2, %ymm0
; X64-KNL-NEXT: retq
;
; X86-KNL-LABEL: test7:
@@ -271,9 +271,9 @@ define <8 x i32> @test7(ptr %base, <8 x i32> %ind, i8 %mask) {
; X86-KNL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X86-KNL-NEXT: kmovw %k1, %k2
; X86-KNL-NEXT: vpgatherdd (%eax,%zmm0,4), %zmm1 {%k2}
-; X86-KNL-NEXT: vmovdqa64 %zmm1, %zmm2
-; X86-KNL-NEXT: vpgatherdd (%eax,%zmm0,4), %zmm2 {%k1}
-; X86-KNL-NEXT: vpaddd %ymm2, %ymm1, %ymm0
+; X86-KNL-NEXT: vmovdqa %ymm1, %ymm2
+; X86-KNL-NEXT: vpgatherdd (%eax,%zmm0,4), %zmm1 {%k1}
+; X86-KNL-NEXT: vpaddd %ymm1, %ymm2, %ymm0
; X86-KNL-NEXT: retl
;
; X64-SKX-LABEL: test7:
diff --git a/llvm/test/CodeGen/X86/midpoint-int.ll b/llvm/test/CodeGen/X86/midpoint-int.ll
index a75d42e..c058e37 100644
--- a/llvm/test/CodeGen/X86/midpoint-int.ll
+++ b/llvm/test/CodeGen/X86/midpoint-int.ll
@@ -658,9 +658,9 @@ define i16 @scalar_i16_signed_reg_reg(i16 %a1, i16 %a2) nounwind {
; X86: # %bb.0:
; X86-NEXT: pushl %ebx
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: xorl %ebx, %ebx
-; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: movl %eax, %ecx
; X86-NEXT: subw %dx, %ax
; X86-NEXT: setle %bl
; X86-NEXT: leal -1(%ebx,%ebx), %edx
@@ -710,9 +710,9 @@ define i16 @scalar_i16_unsigned_reg_reg(i16 %a1, i16 %a2) nounwind {
; X86: # %bb.0:
; X86-NEXT: pushl %ebx
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: xorl %ebx, %ebx
-; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: movl %eax, %ecx
; X86-NEXT: subw %dx, %ax
; X86-NEXT: setbe %bl
; X86-NEXT: leal -1(%ebx,%ebx), %edx
@@ -765,9 +765,9 @@ define i16 @scalar_i16_signed_mem_reg(ptr %a1_addr, i16 %a2) nounwind {
; X86-NEXT: pushl %ebx
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movzwl (%eax), %ecx
+; X86-NEXT: movzwl (%eax), %eax
; X86-NEXT: xorl %ebx, %ebx
-; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: movl %eax, %ecx
; X86-NEXT: subw %dx, %ax
; X86-NEXT: setle %bl
; X86-NEXT: leal -1(%ebx,%ebx), %edx
@@ -817,11 +817,11 @@ define i16 @scalar_i16_signed_reg_mem(i16 %a1, ptr %a2_addr) nounwind {
; X86-LABEL: scalar_i16_signed_reg_mem:
; X86: # %bb.0:
; X86-NEXT: pushl %ebx
-; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movzwl (%eax), %edx
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movzwl (%ecx), %edx
; X86-NEXT: xorl %ebx, %ebx
-; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: movl %eax, %ecx
; X86-NEXT: subw %dx, %ax
; X86-NEXT: setle %bl
; X86-NEXT: leal -1(%ebx,%ebx), %edx
@@ -871,12 +871,12 @@ define i16 @scalar_i16_signed_mem_mem(ptr %a1_addr, ptr %a2_addr) nounwind {
; X86-LABEL: scalar_i16_signed_mem_mem:
; X86: # %bb.0:
; X86-NEXT: pushl %ebx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movzwl (%ecx), %ecx
-; X86-NEXT: movzwl (%eax), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movzwl (%eax), %eax
+; X86-NEXT: movzwl (%ecx), %edx
; X86-NEXT: xorl %ebx, %ebx
-; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: movl %eax, %ecx
; X86-NEXT: subw %dx, %ax
; X86-NEXT: setle %bl
; X86-NEXT: leal -1(%ebx,%ebx), %edx
diff --git a/llvm/test/CodeGen/X86/mmx-arith.ll b/llvm/test/CodeGen/X86/mmx-arith.ll
index 73d459b..8f97d26 100644
--- a/llvm/test/CodeGen/X86/mmx-arith.ll
+++ b/llvm/test/CodeGen/X86/mmx-arith.ll
@@ -403,11 +403,11 @@ define <1 x i64> @test3(ptr %a, ptr %b, i32 %count) nounwind {
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: xorl %eax, %eax
; X86-NEXT: testl %ecx, %ecx
; X86-NEXT: je .LBB3_1
; X86-NEXT: # %bb.2: # %bb26.preheader
; X86-NEXT: xorl %ebx, %ebx
-; X86-NEXT: xorl %eax, %eax
; X86-NEXT: xorl %edx, %edx
; X86-NEXT: .p2align 4
; X86-NEXT: .LBB3_3: # %bb26
@@ -427,7 +427,6 @@ define <1 x i64> @test3(ptr %a, ptr %b, i32 %count) nounwind {
; X86-NEXT: jb .LBB3_3
; X86-NEXT: jmp .LBB3_4
; X86-NEXT: .LBB3_1:
-; X86-NEXT: xorl %eax, %eax
; X86-NEXT: xorl %edx, %edx
; X86-NEXT: .LBB3_4: # %bb31
; X86-NEXT: popl %esi
diff --git a/llvm/test/CodeGen/X86/mul-constant-i16.ll b/llvm/test/CodeGen/X86/mul-constant-i16.ll
index b1aa789..a663f6a 100644
--- a/llvm/test/CodeGen/X86/mul-constant-i16.ll
+++ b/llvm/test/CodeGen/X86/mul-constant-i16.ll
@@ -715,8 +715,8 @@ define i16 @test_mul_by_66(i16 %x) {
; X64: # %bb.0:
; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: movl %edi, %eax
-; X64-NEXT: shll $6, %eax
-; X64-NEXT: leal (%rax,%rdi,2), %eax
+; X64-NEXT: shll $6, %edi
+; X64-NEXT: leal (%rdi,%rax,2), %eax
; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 66
@@ -757,8 +757,8 @@ define i16 @test_mul_by_520(i16 %x) {
; X64: # %bb.0:
; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: movl %edi, %eax
-; X64-NEXT: shll $9, %eax
-; X64-NEXT: leal (%rax,%rdi,8), %eax
+; X64-NEXT: shll $9, %edi
+; X64-NEXT: leal (%rdi,%rax,8), %eax
; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
%mul = mul nsw i16 %x, 520
diff --git a/llvm/test/CodeGen/X86/mul-constant-i32.ll b/llvm/test/CodeGen/X86/mul-constant-i32.ll
index 79889b9..4129b44 100644
--- a/llvm/test/CodeGen/X86/mul-constant-i32.ll
+++ b/llvm/test/CodeGen/X86/mul-constant-i32.ll
@@ -1155,16 +1155,16 @@ define i32 @test_mul_by_66(i32 %x) {
; X64-HSW: # %bb.0:
; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi
; X64-HSW-NEXT: movl %edi, %eax
-; X64-HSW-NEXT: shll $6, %eax
-; X64-HSW-NEXT: leal (%rax,%rdi,2), %eax
+; X64-HSW-NEXT: shll $6, %edi
+; X64-HSW-NEXT: leal (%rdi,%rax,2), %eax
; X64-HSW-NEXT: retq
;
; X64-JAG-LABEL: test_mul_by_66:
; X64-JAG: # %bb.0:
; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi
; X64-JAG-NEXT: movl %edi, %eax
-; X64-JAG-NEXT: shll $6, %eax
-; X64-JAG-NEXT: leal (%rax,%rdi,2), %eax
+; X64-JAG-NEXT: shll $6, %edi
+; X64-JAG-NEXT: leal (%rdi,%rax,2), %eax
; X64-JAG-NEXT: retq
;
; X86-NOOPT-LABEL: test_mul_by_66:
@@ -1241,16 +1241,16 @@ define i32 @test_mul_by_520(i32 %x) {
; X64-HSW: # %bb.0:
; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi
; X64-HSW-NEXT: movl %edi, %eax
-; X64-HSW-NEXT: shll $9, %eax
-; X64-HSW-NEXT: leal (%rax,%rdi,8), %eax
+; X64-HSW-NEXT: shll $9, %edi
+; X64-HSW-NEXT: leal (%rdi,%rax,8), %eax
; X64-HSW-NEXT: retq
;
; X64-JAG-LABEL: test_mul_by_520:
; X64-JAG: # %bb.0:
; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi
; X64-JAG-NEXT: movl %edi, %eax
-; X64-JAG-NEXT: shll $9, %eax
-; X64-JAG-NEXT: leal (%rax,%rdi,8), %eax
+; X64-JAG-NEXT: shll $9, %edi
+; X64-JAG-NEXT: leal (%rdi,%rax,8), %eax
; X64-JAG-NEXT: retq
;
; X86-NOOPT-LABEL: test_mul_by_520:
diff --git a/llvm/test/CodeGen/X86/mul-constant-i8.ll b/llvm/test/CodeGen/X86/mul-constant-i8.ll
index a4fa1ee..b488653 100644
--- a/llvm/test/CodeGen/X86/mul-constant-i8.ll
+++ b/llvm/test/CodeGen/X86/mul-constant-i8.ll
@@ -425,8 +425,8 @@ define i8 @test_mul_by_66(i8 %x) {
; X64: # %bb.0:
; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: movl %edi, %eax
-; X64-NEXT: shll $6, %eax
-; X64-NEXT: leal (%rax,%rdi,2), %eax
+; X64-NEXT: shll $6, %edi
+; X64-NEXT: leal (%rdi,%rax,2), %eax
; X64-NEXT: # kill: def $al killed $al killed $eax
; X64-NEXT: retq
%m = mul i8 %x, 66
diff --git a/llvm/test/CodeGen/X86/optimize-max-0.ll b/llvm/test/CodeGen/X86/optimize-max-0.ll
index 283c00e..b6af7e1 100644
--- a/llvm/test/CodeGen/X86/optimize-max-0.ll
+++ b/llvm/test/CodeGen/X86/optimize-max-0.ll
@@ -16,65 +16,65 @@ define void @foo(ptr %r, i32 %s, i32 %w, i32 %x, ptr %j, i32 %d) nounwind {
; CHECK-NEXT: pushl %esi
; CHECK-NEXT: subl $28, %esp
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edi
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ebp
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx
-; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: imull %ebp, %ecx
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %esi
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; CHECK-NEXT: movl %edx, %eax
+; CHECK-NEXT: imull %esi, %eax
; CHECK-NEXT: cmpl $1, {{[0-9]+}}(%esp)
-; CHECK-NEXT: movl %ecx, (%esp) ## 4-byte Spill
+; CHECK-NEXT: movl %eax, (%esp) ## 4-byte Spill
; CHECK-NEXT: je LBB0_19
; CHECK-NEXT: ## %bb.1: ## %bb10.preheader
-; CHECK-NEXT: movl %ecx, %eax
-; CHECK-NEXT: sarl $31, %eax
-; CHECK-NEXT: shrl $30, %eax
-; CHECK-NEXT: addl %ecx, %eax
-; CHECK-NEXT: sarl $2, %eax
-; CHECK-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Spill
-; CHECK-NEXT: testl %edi, %edi
+; CHECK-NEXT: movl %eax, %ebp
+; CHECK-NEXT: sarl $31, %ebp
+; CHECK-NEXT: shrl $30, %ebp
+; CHECK-NEXT: addl %eax, %ebp
+; CHECK-NEXT: sarl $2, %ebp
+; CHECK-NEXT: testl %edx, %edx
; CHECK-NEXT: jle LBB0_12
; CHECK-NEXT: ## %bb.2: ## %bb.nph9
-; CHECK-NEXT: testl %ebp, %ebp
+; CHECK-NEXT: testl %esi, %esi
; CHECK-NEXT: jle LBB0_12
; CHECK-NEXT: ## %bb.3: ## %bb.nph9.split
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: incl %eax
; CHECK-NEXT: xorl %ecx, %ecx
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx
-; CHECK-NEXT: xorl %esi, %esi
+; CHECK-NEXT: movl %edi, %edx
+; CHECK-NEXT: xorl %edi, %edi
; CHECK-NEXT: .p2align 4
; CHECK-NEXT: LBB0_4: ## %bb6
; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: movzbl (%eax,%esi,2), %ebx
-; CHECK-NEXT: movb %bl, (%edx,%esi)
-; CHECK-NEXT: incl %esi
-; CHECK-NEXT: cmpl %ebp, %esi
+; CHECK-NEXT: movzbl (%eax,%edi,2), %ebx
+; CHECK-NEXT: movb %bl, (%edx,%edi)
+; CHECK-NEXT: incl %edi
+; CHECK-NEXT: cmpl %esi, %edi
; CHECK-NEXT: jl LBB0_4
; CHECK-NEXT: ## %bb.5: ## %bb9
; CHECK-NEXT: ## in Loop: Header=BB0_4 Depth=1
; CHECK-NEXT: incl %ecx
; CHECK-NEXT: addl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT: addl %ebp, %edx
-; CHECK-NEXT: cmpl %edi, %ecx
+; CHECK-NEXT: addl %esi, %edx
+; CHECK-NEXT: cmpl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: je LBB0_12
; CHECK-NEXT: ## %bb.6: ## %bb7.preheader
; CHECK-NEXT: ## in Loop: Header=BB0_4 Depth=1
-; CHECK-NEXT: xorl %esi, %esi
+; CHECK-NEXT: xorl %edi, %edi
; CHECK-NEXT: jmp LBB0_4
; CHECK-NEXT: LBB0_12: ## %bb18.loopexit
+; CHECK-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Spill
; CHECK-NEXT: movl (%esp), %eax ## 4-byte Reload
-; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx ## 4-byte Reload
-; CHECK-NEXT: addl %ecx, %eax
+; CHECK-NEXT: addl %ebp, %eax
; CHECK-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Spill
-; CHECK-NEXT: cmpl $1, %edi
+; CHECK-NEXT: cmpl $1, {{[0-9]+}}(%esp)
; CHECK-NEXT: jle LBB0_13
; CHECK-NEXT: ## %bb.7: ## %bb.nph5
-; CHECK-NEXT: cmpl $2, %ebp
+; CHECK-NEXT: cmpl $2, %esi
; CHECK-NEXT: jl LBB0_13
; CHECK-NEXT: ## %bb.8: ## %bb.nph5.split
-; CHECK-NEXT: movl %ebp, %edx
-; CHECK-NEXT: shrl $31, %edx
-; CHECK-NEXT: addl %ebp, %edx
-; CHECK-NEXT: sarl %edx
+; CHECK-NEXT: movl %esi, %ebp
+; CHECK-NEXT: shrl $31, %ebp
+; CHECK-NEXT: addl %esi, %ebp
+; CHECK-NEXT: sarl %ebp
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movl %eax, %ecx
; CHECK-NEXT: shrl $31, %ecx
@@ -84,102 +84,103 @@ define void @foo(ptr %r, i32 %s, i32 %w, i32 %x, ptr %j, i32 %d) nounwind {
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax ## 4-byte Reload
; CHECK-NEXT: addl %ecx, %eax
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %esi
-; CHECK-NEXT: addl $2, %esi
-; CHECK-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Spill
-; CHECK-NEXT: movl (%esp), %esi ## 4-byte Reload
-; CHECK-NEXT: addl %esi, %ecx
-; CHECK-NEXT: xorl %esi, %esi
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx
+; CHECK-NEXT: addl $2, %edx
+; CHECK-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Spill
+; CHECK-NEXT: movl (%esp), %edx ## 4-byte Reload
+; CHECK-NEXT: addl %edx, %ecx
; CHECK-NEXT: xorl %edi, %edi
+; CHECK-NEXT: xorl %edx, %edx
; CHECK-NEXT: .p2align 4
; CHECK-NEXT: LBB0_9: ## %bb13
; CHECK-NEXT: ## =>This Loop Header: Depth=1
; CHECK-NEXT: ## Child Loop BB0_10 Depth 2
; CHECK-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Spill
; CHECK-NEXT: andl $1, %edi
-; CHECK-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Spill
-; CHECK-NEXT: addl %esi, %edi
+; CHECK-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Spill
+; CHECK-NEXT: addl %edx, %edi
; CHECK-NEXT: imull {{[0-9]+}}(%esp), %edi
; CHECK-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi ## 4-byte Folded Reload
-; CHECK-NEXT: xorl %esi, %esi
+; CHECK-NEXT: xorl %ebx, %ebx
; CHECK-NEXT: .p2align 4
; CHECK-NEXT: LBB0_10: ## %bb14
; CHECK-NEXT: ## Parent Loop BB0_9 Depth=1
; CHECK-NEXT: ## => This Inner Loop Header: Depth=2
-; CHECK-NEXT: movzbl -2(%edi,%esi,4), %ebx
-; CHECK-NEXT: movb %bl, (%ecx,%esi)
-; CHECK-NEXT: movzbl (%edi,%esi,4), %ebx
-; CHECK-NEXT: movb %bl, (%eax,%esi)
-; CHECK-NEXT: incl %esi
-; CHECK-NEXT: cmpl %edx, %esi
+; CHECK-NEXT: movzbl -2(%edi,%ebx,4), %edx
+; CHECK-NEXT: movb %dl, (%ecx,%ebx)
+; CHECK-NEXT: movzbl (%edi,%ebx,4), %edx
+; CHECK-NEXT: movb %dl, (%eax,%ebx)
+; CHECK-NEXT: incl %ebx
+; CHECK-NEXT: cmpl %ebp, %ebx
; CHECK-NEXT: jl LBB0_10
; CHECK-NEXT: ## %bb.11: ## %bb17
; CHECK-NEXT: ## in Loop: Header=BB0_9 Depth=1
; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi ## 4-byte Reload
; CHECK-NEXT: incl %edi
-; CHECK-NEXT: addl %edx, %eax
-; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi ## 4-byte Reload
-; CHECK-NEXT: addl $2, %esi
-; CHECK-NEXT: addl %edx, %ecx
+; CHECK-NEXT: addl %ebp, %eax
+; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx ## 4-byte Reload
+; CHECK-NEXT: addl $2, %edx
+; CHECK-NEXT: addl %ebp, %ecx
; CHECK-NEXT: cmpl {{[-0-9]+}}(%e{{[sb]}}p), %edi ## 4-byte Folded Reload
; CHECK-NEXT: jl LBB0_9
; CHECK-NEXT: LBB0_13: ## %bb20
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT: cmpl $1, %eax
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edi
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; CHECK-NEXT: cmpl $1, %ecx
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ebx
; CHECK-NEXT: je LBB0_19
; CHECK-NEXT: ## %bb.14: ## %bb20
-; CHECK-NEXT: cmpl $3, %eax
+; CHECK-NEXT: cmpl $3, %ecx
; CHECK-NEXT: jne LBB0_24
; CHECK-NEXT: ## %bb.15: ## %bb22
-; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx ## 4-byte Reload
-; CHECK-NEXT: addl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Folded Spill
-; CHECK-NEXT: testl %edi, %edi
+; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp ## 4-byte Reload
+; CHECK-NEXT: addl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Folded Spill
+; CHECK-NEXT: testl %edx, %edx
; CHECK-NEXT: jle LBB0_18
; CHECK-NEXT: ## %bb.16: ## %bb.nph
-; CHECK-NEXT: leal 15(%edi), %eax
+; CHECK-NEXT: leal 15(%edx), %eax
; CHECK-NEXT: andl $-16, %eax
; CHECK-NEXT: imull {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT: addl %ebx, %ebx
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; CHECK-NEXT: movl (%esp), %esi ## 4-byte Reload
-; CHECK-NEXT: addl %esi, %ecx
-; CHECK-NEXT: addl %ecx, %ebx
-; CHECK-NEXT: addl %eax, %edx
-; CHECK-NEXT: leal 15(%ebp), %eax
+; CHECK-NEXT: addl %ebp, %ebp
+; CHECK-NEXT: movl (%esp), %ecx ## 4-byte Reload
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edi
+; CHECK-NEXT: addl %edi, %ecx
+; CHECK-NEXT: addl %ecx, %ebp
+; CHECK-NEXT: addl %eax, %ebx
+; CHECK-NEXT: leal 15(%esi), %eax
; CHECK-NEXT: andl $-16, %eax
; CHECK-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Spill
; CHECK-NEXT: .p2align 4
; CHECK-NEXT: LBB0_17: ## %bb23
; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
; CHECK-NEXT: subl $4, %esp
-; CHECK-NEXT: pushl %ebp
-; CHECK-NEXT: pushl %edx
+; CHECK-NEXT: pushl %esi
; CHECK-NEXT: pushl %ebx
-; CHECK-NEXT: movl %ebx, %esi
+; CHECK-NEXT: pushl %ebp
+; CHECK-NEXT: movl %ebp, %edi
+; CHECK-NEXT: movl %ebx, %ebp
; CHECK-NEXT: movl %edx, %ebx
; CHECK-NEXT: calll _memcpy
; CHECK-NEXT: movl %ebx, %edx
-; CHECK-NEXT: movl %esi, %ebx
+; CHECK-NEXT: movl %ebp, %ebx
+; CHECK-NEXT: movl %edi, %ebp
; CHECK-NEXT: addl $16, %esp
-; CHECK-NEXT: addl %ebp, %ebx
-; CHECK-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx ## 4-byte Folded Reload
-; CHECK-NEXT: decl %edi
+; CHECK-NEXT: addl %esi, %ebp
+; CHECK-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx ## 4-byte Folded Reload
+; CHECK-NEXT: decl %edx
; CHECK-NEXT: jne LBB0_17
; CHECK-NEXT: LBB0_18: ## %bb26
-; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax ## 4-byte Reload
-; CHECK-NEXT: movl (%esp), %edx ## 4-byte Reload
-; CHECK-NEXT: addl %edx, %eax
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; CHECK-NEXT: addl %eax, %ecx
+; CHECK-NEXT: movl (%esp), %ecx ## 4-byte Reload
+; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi ## 4-byte Reload
+; CHECK-NEXT: addl %ecx, %esi
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx
+; CHECK-NEXT: addl %esi, %edx
; CHECK-NEXT: jmp LBB0_23
; CHECK-NEXT: LBB0_19: ## %bb29
-; CHECK-NEXT: testl %edi, %edi
+; CHECK-NEXT: testl %edx, %edx
; CHECK-NEXT: jle LBB0_22
; CHECK-NEXT: ## %bb.20: ## %bb.nph11
-; CHECK-NEXT: movl %edi, %esi
-; CHECK-NEXT: leal 15(%ebp), %eax
+; CHECK-NEXT: leal 15(%esi), %eax
; CHECK-NEXT: andl $-16, %eax
; CHECK-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Spill
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edi
@@ -187,30 +188,32 @@ define void @foo(ptr %r, i32 %s, i32 %w, i32 %x, ptr %j, i32 %d) nounwind {
; CHECK-NEXT: LBB0_21: ## %bb30
; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
; CHECK-NEXT: subl $4, %esp
-; CHECK-NEXT: pushl %ebp
-; CHECK-NEXT: pushl %edx
+; CHECK-NEXT: pushl %esi
+; CHECK-NEXT: pushl %ebx
; CHECK-NEXT: pushl %edi
+; CHECK-NEXT: movl %ebx, %ebp
; CHECK-NEXT: movl %edx, %ebx
; CHECK-NEXT: calll _memcpy
; CHECK-NEXT: movl %ebx, %edx
+; CHECK-NEXT: movl %ebp, %ebx
; CHECK-NEXT: addl $16, %esp
-; CHECK-NEXT: addl %ebp, %edi
-; CHECK-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx ## 4-byte Folded Reload
-; CHECK-NEXT: decl %esi
+; CHECK-NEXT: addl %esi, %edi
+; CHECK-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx ## 4-byte Folded Reload
+; CHECK-NEXT: decl %edx
; CHECK-NEXT: jne LBB0_21
; CHECK-NEXT: LBB0_22: ## %bb33
-; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; CHECK-NEXT: movl (%esp), %edx ## 4-byte Reload
-; CHECK-NEXT: addl %edx, %ecx
+; CHECK-NEXT: movl (%esp), %ecx ## 4-byte Reload
+; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx
+; CHECK-NEXT: addl %ecx, %edx
; CHECK-NEXT: LBB0_23: ## %bb33
-; CHECK-NEXT: movl %edx, %eax
+; CHECK-NEXT: movl %ecx, %eax
; CHECK-NEXT: shrl $31, %eax
-; CHECK-NEXT: addl %edx, %eax
+; CHECK-NEXT: addl %ecx, %eax
; CHECK-NEXT: sarl %eax
; CHECK-NEXT: subl $4, %esp
; CHECK-NEXT: pushl %eax
; CHECK-NEXT: pushl $128
-; CHECK-NEXT: pushl %ecx
+; CHECK-NEXT: pushl %edx
; CHECK-NEXT: calll _memset
; CHECK-NEXT: addl $44, %esp
; CHECK-NEXT: LBB0_25: ## %return
@@ -523,38 +526,38 @@ define void @bar(ptr %r, i32 %s, i32 %w, i32 %x, ptr %j, i32 %d) nounwind {
; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx ## 4-byte Reload
; CHECK-NEXT: addl %edx, %eax
; CHECK-NEXT: xorl %edx, %edx
-; CHECK-NEXT: xorl %ebx, %ebx
+; CHECK-NEXT: xorl %esi, %esi
; CHECK-NEXT: .p2align 4
; CHECK-NEXT: LBB1_9: ## %bb13
; CHECK-NEXT: ## =>This Loop Header: Depth=1
; CHECK-NEXT: ## Child Loop BB1_10 Depth 2
-; CHECK-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Spill
-; CHECK-NEXT: andl $1, %ebx
; CHECK-NEXT: movl %edx, (%esp) ## 4-byte Spill
-; CHECK-NEXT: addl %edx, %ebx
-; CHECK-NEXT: imull {{[0-9]+}}(%esp), %ebx
-; CHECK-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx ## 4-byte Folded Reload
+; CHECK-NEXT: andl $1, %edx
+; CHECK-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Spill
+; CHECK-NEXT: addl %esi, %edx
+; CHECK-NEXT: imull {{[0-9]+}}(%esp), %edx
+; CHECK-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx ## 4-byte Folded Reload
; CHECK-NEXT: xorl %esi, %esi
; CHECK-NEXT: .p2align 4
; CHECK-NEXT: LBB1_10: ## %bb14
; CHECK-NEXT: ## Parent Loop BB1_9 Depth=1
; CHECK-NEXT: ## => This Inner Loop Header: Depth=2
-; CHECK-NEXT: movzbl -2(%ebx,%esi,4), %edx
-; CHECK-NEXT: movb %dl, (%eax,%esi)
-; CHECK-NEXT: movzbl (%ebx,%esi,4), %edx
-; CHECK-NEXT: movb %dl, (%ecx,%esi)
+; CHECK-NEXT: movzbl -2(%edx,%esi,4), %ebx
+; CHECK-NEXT: movb %bl, (%eax,%esi)
+; CHECK-NEXT: movzbl (%edx,%esi,4), %ebx
+; CHECK-NEXT: movb %bl, (%ecx,%esi)
; CHECK-NEXT: incl %esi
; CHECK-NEXT: cmpl %ebp, %esi
; CHECK-NEXT: jb LBB1_10
; CHECK-NEXT: ## %bb.11: ## %bb17
; CHECK-NEXT: ## in Loop: Header=BB1_9 Depth=1
-; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx ## 4-byte Reload
-; CHECK-NEXT: incl %ebx
-; CHECK-NEXT: addl %ebp, %ecx
; CHECK-NEXT: movl (%esp), %edx ## 4-byte Reload
-; CHECK-NEXT: addl $2, %edx
+; CHECK-NEXT: incl %edx
+; CHECK-NEXT: addl %ebp, %ecx
+; CHECK-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi ## 4-byte Reload
+; CHECK-NEXT: addl $2, %esi
; CHECK-NEXT: addl %ebp, %eax
-; CHECK-NEXT: cmpl {{[-0-9]+}}(%e{{[sb]}}p), %ebx ## 4-byte Folded Reload
+; CHECK-NEXT: cmpl {{[-0-9]+}}(%e{{[sb]}}p), %edx ## 4-byte Folded Reload
; CHECK-NEXT: jb LBB1_9
; CHECK-NEXT: LBB1_13: ## %bb20
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %esi
diff --git a/llvm/test/CodeGen/X86/parity.ll b/llvm/test/CodeGen/X86/parity.ll
index 420f5ba..31a7f11 100644
--- a/llvm/test/CodeGen/X86/parity.ll
+++ b/llvm/test/CodeGen/X86/parity.ll
@@ -219,12 +219,12 @@ define i64 @parity_64(i64 %x) {
;
; X64-NOPOPCNT-LABEL: parity_64:
; X64-NOPOPCNT: # %bb.0:
-; X64-NOPOPCNT-NEXT: movq %rdi, %rax
-; X64-NOPOPCNT-NEXT: shrq $32, %rax
-; X64-NOPOPCNT-NEXT: xorl %edi, %eax
-; X64-NOPOPCNT-NEXT: movl %eax, %ecx
+; X64-NOPOPCNT-NEXT: movl %edi, %eax
+; X64-NOPOPCNT-NEXT: shrq $32, %rdi
+; X64-NOPOPCNT-NEXT: xorl %eax, %edi
+; X64-NOPOPCNT-NEXT: movl %edi, %ecx
; X64-NOPOPCNT-NEXT: shrl $16, %ecx
-; X64-NOPOPCNT-NEXT: xorl %eax, %ecx
+; X64-NOPOPCNT-NEXT: xorl %edi, %ecx
; X64-NOPOPCNT-NEXT: xorl %eax, %eax
; X64-NOPOPCNT-NEXT: xorb %ch, %cl
; X64-NOPOPCNT-NEXT: setnp %al
@@ -264,12 +264,12 @@ define i32 @parity_64_trunc(i64 %x) {
;
; X64-NOPOPCNT-LABEL: parity_64_trunc:
; X64-NOPOPCNT: # %bb.0:
-; X64-NOPOPCNT-NEXT: movq %rdi, %rax
-; X64-NOPOPCNT-NEXT: shrq $32, %rax
-; X64-NOPOPCNT-NEXT: xorl %edi, %eax
-; X64-NOPOPCNT-NEXT: movl %eax, %ecx
+; X64-NOPOPCNT-NEXT: movl %edi, %eax
+; X64-NOPOPCNT-NEXT: shrq $32, %rdi
+; X64-NOPOPCNT-NEXT: xorl %eax, %edi
+; X64-NOPOPCNT-NEXT: movl %edi, %ecx
; X64-NOPOPCNT-NEXT: shrl $16, %ecx
-; X64-NOPOPCNT-NEXT: xorl %eax, %ecx
+; X64-NOPOPCNT-NEXT: xorl %edi, %ecx
; X64-NOPOPCNT-NEXT: xorl %eax, %eax
; X64-NOPOPCNT-NEXT: xorb %ch, %cl
; X64-NOPOPCNT-NEXT: setnp %al
@@ -628,12 +628,12 @@ define i64 @parity_64_shift(i64 %0) {
;
; X64-NOPOPCNT-LABEL: parity_64_shift:
; X64-NOPOPCNT: # %bb.0:
-; X64-NOPOPCNT-NEXT: movq %rdi, %rax
-; X64-NOPOPCNT-NEXT: shrq $32, %rax
-; X64-NOPOPCNT-NEXT: xorl %edi, %eax
-; X64-NOPOPCNT-NEXT: movl %eax, %ecx
+; X64-NOPOPCNT-NEXT: movl %edi, %eax
+; X64-NOPOPCNT-NEXT: shrq $32, %rdi
+; X64-NOPOPCNT-NEXT: xorl %eax, %edi
+; X64-NOPOPCNT-NEXT: movl %edi, %ecx
; X64-NOPOPCNT-NEXT: shrl $16, %ecx
-; X64-NOPOPCNT-NEXT: xorl %eax, %ecx
+; X64-NOPOPCNT-NEXT: xorl %edi, %ecx
; X64-NOPOPCNT-NEXT: xorl %eax, %eax
; X64-NOPOPCNT-NEXT: xorb %ch, %cl
; X64-NOPOPCNT-NEXT: setnp %al
diff --git a/llvm/test/CodeGen/X86/pr166534.ll b/llvm/test/CodeGen/X86/pr166534.ll
index aef44cc..162a0c9 100644
--- a/llvm/test/CodeGen/X86/pr166534.ll
+++ b/llvm/test/CodeGen/X86/pr166534.ll
@@ -7,100 +7,64 @@
define void @pr166534(ptr %pa, ptr %pb, ptr %pc, ptr %pd) {
; SSE2-LABEL: pr166534:
; SSE2: # %bb.0: # %entry
-; SSE2-NEXT: movq (%rdi), %rax
-; SSE2-NEXT: movq 8(%rdi), %r8
; SSE2-NEXT: movdqu (%rdi), %xmm0
-; SSE2-NEXT: movq (%rsi), %r9
-; SSE2-NEXT: movq 8(%rsi), %rdi
; SSE2-NEXT: movdqu (%rsi), %xmm1
; SSE2-NEXT: pcmpeqb %xmm0, %xmm1
; SSE2-NEXT: pmovmskb %xmm1, %esi
-; SSE2-NEXT: xorl %r10d, %r10d
+; SSE2-NEXT: xorl %eax, %eax
; SSE2-NEXT: cmpl $65535, %esi # imm = 0xFFFF
-; SSE2-NEXT: sete %r10b
-; SSE2-NEXT: orq %r10, (%rdx)
+; SSE2-NEXT: sete %al
+; SSE2-NEXT: orq %rax, (%rdx)
; SSE2-NEXT: cmpl $65535, %esi # imm = 0xFFFF
; SSE2-NEXT: jne .LBB0_2
; SSE2-NEXT: # %bb.1: # %if.then
-; SSE2-NEXT: xorq %r9, %rax
-; SSE2-NEXT: xorq %rdi, %r8
-; SSE2-NEXT: xorl %edx, %edx
-; SSE2-NEXT: orq %rax, %r8
-; SSE2-NEXT: sete %dl
-; SSE2-NEXT: orq %rdx, (%rcx)
+; SSE2-NEXT: orq %rax, (%rcx)
; SSE2-NEXT: .LBB0_2: # %if.end
; SSE2-NEXT: retq
;
; SSE4-LABEL: pr166534:
; SSE4: # %bb.0: # %entry
-; SSE4-NEXT: movq (%rdi), %rax
-; SSE4-NEXT: movq 8(%rdi), %r8
; SSE4-NEXT: movdqu (%rdi), %xmm0
-; SSE4-NEXT: movq (%rsi), %r9
-; SSE4-NEXT: movq 8(%rsi), %rdi
; SSE4-NEXT: movdqu (%rsi), %xmm1
; SSE4-NEXT: pxor %xmm0, %xmm1
-; SSE4-NEXT: xorl %esi, %esi
+; SSE4-NEXT: xorl %eax, %eax
; SSE4-NEXT: ptest %xmm1, %xmm1
-; SSE4-NEXT: sete %sil
-; SSE4-NEXT: orq %rsi, (%rdx)
+; SSE4-NEXT: sete %al
+; SSE4-NEXT: orq %rax, (%rdx)
; SSE4-NEXT: ptest %xmm1, %xmm1
; SSE4-NEXT: jne .LBB0_2
; SSE4-NEXT: # %bb.1: # %if.then
-; SSE4-NEXT: xorq %r9, %rax
-; SSE4-NEXT: xorq %rdi, %r8
-; SSE4-NEXT: xorl %edx, %edx
-; SSE4-NEXT: orq %rax, %r8
-; SSE4-NEXT: sete %dl
-; SSE4-NEXT: orq %rdx, (%rcx)
+; SSE4-NEXT: orq %rax, (%rcx)
; SSE4-NEXT: .LBB0_2: # %if.end
; SSE4-NEXT: retq
;
; AVX2-LABEL: pr166534:
; AVX2: # %bb.0: # %entry
-; AVX2-NEXT: movq (%rdi), %rax
-; AVX2-NEXT: movq 8(%rdi), %r8
; AVX2-NEXT: vmovdqu (%rdi), %xmm0
-; AVX2-NEXT: movq (%rsi), %rdi
; AVX2-NEXT: vpxor (%rsi), %xmm0, %xmm0
-; AVX2-NEXT: movq 8(%rsi), %rsi
-; AVX2-NEXT: xorl %r9d, %r9d
+; AVX2-NEXT: xorl %eax, %eax
; AVX2-NEXT: vptest %xmm0, %xmm0
-; AVX2-NEXT: sete %r9b
-; AVX2-NEXT: orq %r9, (%rdx)
+; AVX2-NEXT: sete %al
+; AVX2-NEXT: orq %rax, (%rdx)
; AVX2-NEXT: vptest %xmm0, %xmm0
; AVX2-NEXT: jne .LBB0_2
; AVX2-NEXT: # %bb.1: # %if.then
-; AVX2-NEXT: xorq %rdi, %rax
-; AVX2-NEXT: xorq %rsi, %r8
-; AVX2-NEXT: xorl %edx, %edx
-; AVX2-NEXT: orq %rax, %r8
-; AVX2-NEXT: sete %dl
-; AVX2-NEXT: orq %rdx, (%rcx)
+; AVX2-NEXT: orq %rax, (%rcx)
; AVX2-NEXT: .LBB0_2: # %if.end
; AVX2-NEXT: retq
;
; AVX512-LABEL: pr166534:
; AVX512: # %bb.0: # %entry
-; AVX512-NEXT: movq (%rdi), %rax
-; AVX512-NEXT: movq 8(%rdi), %r8
; AVX512-NEXT: vmovdqu (%rdi), %xmm0
-; AVX512-NEXT: movq (%rsi), %r9
-; AVX512-NEXT: movq 8(%rsi), %rdi
; AVX512-NEXT: vpxor (%rsi), %xmm0, %xmm0
-; AVX512-NEXT: xorl %esi, %esi
+; AVX512-NEXT: xorl %eax, %eax
; AVX512-NEXT: vptest %xmm0, %xmm0
-; AVX512-NEXT: sete %sil
-; AVX512-NEXT: orq %rsi, (%rdx)
+; AVX512-NEXT: sete %al
+; AVX512-NEXT: orq %rax, (%rdx)
; AVX512-NEXT: vptest %xmm0, %xmm0
; AVX512-NEXT: jne .LBB0_2
; AVX512-NEXT: # %bb.1: # %if.then
-; AVX512-NEXT: xorq %r9, %rax
-; AVX512-NEXT: xorq %rdi, %r8
-; AVX512-NEXT: xorl %edx, %edx
-; AVX512-NEXT: orq %rax, %r8
-; AVX512-NEXT: sete %dl
-; AVX512-NEXT: orq %rdx, (%rcx)
+; AVX512-NEXT: orq %rax, (%rcx)
; AVX512-NEXT: .LBB0_2: # %if.end
; AVX512-NEXT: retq
entry:
diff --git a/llvm/test/CodeGen/X86/pr166744.ll b/llvm/test/CodeGen/X86/pr166744.ll
index 21b25d8..ffdb68c 100644
--- a/llvm/test/CodeGen/X86/pr166744.ll
+++ b/llvm/test/CodeGen/X86/pr166744.ll
@@ -31,13 +31,13 @@ define i1 @PR166744(ptr %v, i64 %idx, i1 zeroext %b) {
; NOPOSTRA-LABEL: PR166744:
; NOPOSTRA: # %bb.0:
; NOPOSTRA-NEXT: movl %esi, %eax
-; NOPOSTRA-NEXT: shrl $3, %eax
-; NOPOSTRA-NEXT: andl $60, %eax
-; NOPOSTRA-NEXT: movl (%rdi,%rax), %ecx
-; NOPOSTRA-NEXT: btrl %esi, %ecx
-; NOPOSTRA-NEXT: shlxl %esi, %edx, %edx
-; NOPOSTRA-NEXT: orl %ecx, %edx
-; NOPOSTRA-NEXT: movl %edx, (%rdi,%rax)
+; NOPOSTRA-NEXT: shrl $3, %esi
+; NOPOSTRA-NEXT: andl $60, %esi
+; NOPOSTRA-NEXT: movl (%rdi,%rsi), %ecx
+; NOPOSTRA-NEXT: btrl %eax, %ecx
+; NOPOSTRA-NEXT: shlxl %eax, %edx, %eax
+; NOPOSTRA-NEXT: orl %ecx, %eax
+; NOPOSTRA-NEXT: movl %eax, (%rdi,%rsi)
; NOPOSTRA-NEXT: movq 16(%rdi), %rax
; NOPOSTRA-NEXT: movq (%rdi), %rcx
; NOPOSTRA-NEXT: movq 8(%rdi), %rdx
diff --git a/llvm/test/CodeGen/X86/rotate-extract.ll b/llvm/test/CodeGen/X86/rotate-extract.ll
index 8f046a4..26e6886 100644
--- a/llvm/test/CodeGen/X86/rotate-extract.ll
+++ b/llvm/test/CodeGen/X86/rotate-extract.ll
@@ -203,10 +203,10 @@ define i16 @no_extract_mul(i16 %i) nounwind {
; X64-LABEL: no_extract_mul:
; X64: # %bb.0:
; X64-NEXT: # kill: def $edi killed $edi def $rdi
-; X64-NEXT: leal (%rdi,%rdi,8), %eax
-; X64-NEXT: # kill: def $edi killed $edi killed $rdi def $rdi
+; X64-NEXT: movl %edi, %eax
; X64-NEXT: shll $8, %edi
; X64-NEXT: leal (%rdi,%rdi,8), %ecx
+; X64-NEXT: leal (%rax,%rax,8), %eax
; X64-NEXT: movzwl %ax, %eax
; X64-NEXT: shrl $9, %eax
; X64-NEXT: orl %ecx, %eax
diff --git a/llvm/test/CodeGen/X86/smul_fix.ll b/llvm/test/CodeGen/X86/smul_fix.ll
index ce56283..8cb0327 100644
--- a/llvm/test/CodeGen/X86/smul_fix.ll
+++ b/llvm/test/CodeGen/X86/smul_fix.ll
@@ -10,10 +10,10 @@ declare <4 x i32> @llvm.smul.fix.v4i32(<4 x i32>, <4 x i32>, i32)
define i32 @func(i32 %x, i32 %y) nounwind {
; X64-LABEL: func:
; X64: # %bb.0:
-; X64-NEXT: movslq %esi, %rax
-; X64-NEXT: movslq %edi, %rcx
-; X64-NEXT: imulq %rax, %rcx
-; X64-NEXT: movq %rcx, %rax
+; X64-NEXT: movslq %esi, %rcx
+; X64-NEXT: movslq %edi, %rax
+; X64-NEXT: imulq %rcx, %rax
+; X64-NEXT: movl %eax, %ecx
; X64-NEXT: shrq $32, %rax
; X64-NEXT: shldl $30, %ecx, %eax
; X64-NEXT: # kill: def $eax killed $eax killed $rax
diff --git a/llvm/test/CodeGen/X86/sshl_sat.ll b/llvm/test/CodeGen/X86/sshl_sat.ll
index e5ea911..a93be22 100644
--- a/llvm/test/CodeGen/X86/sshl_sat.ll
+++ b/llvm/test/CodeGen/X86/sshl_sat.ll
@@ -15,16 +15,16 @@ define i16 @func(i16 %x, i16 %y) nounwind {
; X64: # %bb.0:
; X64-NEXT: movl %esi, %ecx
; X64-NEXT: movl %edi, %edx
-; X64-NEXT: shll %cl, %edx
-; X64-NEXT: movswl %dx, %esi
+; X64-NEXT: shll %cl, %edi
+; X64-NEXT: movswl %di, %esi
; X64-NEXT: # kill: def $cl killed $cl killed $ecx
; X64-NEXT: sarl %cl, %esi
; X64-NEXT: xorl %eax, %eax
-; X64-NEXT: testw %di, %di
+; X64-NEXT: testw %dx, %dx
; X64-NEXT: sets %al
; X64-NEXT: addl $32767, %eax # imm = 0x7FFF
-; X64-NEXT: cmpw %si, %di
-; X64-NEXT: cmovel %edx, %eax
+; X64-NEXT: cmpw %si, %dx
+; X64-NEXT: cmovel %edi, %eax
; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
;
@@ -33,17 +33,17 @@ define i16 @func(i16 %x, i16 %y) nounwind {
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %edx, %esi
-; X86-NEXT: shll %cl, %esi
-; X86-NEXT: movswl %si, %edi
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: shll %cl, %edx
+; X86-NEXT: movswl %dx, %edi
; X86-NEXT: sarl %cl, %edi
; X86-NEXT: xorl %eax, %eax
-; X86-NEXT: testw %dx, %dx
+; X86-NEXT: testw %si, %si
; X86-NEXT: sets %al
; X86-NEXT: addl $32767, %eax # imm = 0x7FFF
-; X86-NEXT: cmpw %di, %dx
-; X86-NEXT: cmovel %esi, %eax
+; X86-NEXT: cmpw %di, %si
+; X86-NEXT: cmovel %edx, %eax
; X86-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
@@ -58,18 +58,18 @@ define i16 @func2(i8 %x, i8 %y) nounwind {
; X64-NEXT: movl %esi, %ecx
; X64-NEXT: movsbl %dil, %eax
; X64-NEXT: addl %eax, %eax
-; X64-NEXT: xorl %edx, %edx
+; X64-NEXT: movl %eax, %edx
+; X64-NEXT: xorl %esi, %esi
; X64-NEXT: testw %ax, %ax
-; X64-NEXT: sets %dl
-; X64-NEXT: addl $32767, %edx # imm = 0x7FFF
-; X64-NEXT: movl %eax, %esi
-; X64-NEXT: shll %cl, %esi
-; X64-NEXT: movswl %si, %edi
+; X64-NEXT: sets %sil
+; X64-NEXT: addl $32767, %esi # imm = 0x7FFF
+; X64-NEXT: shll %cl, %eax
+; X64-NEXT: movswl %ax, %edi
; X64-NEXT: # kill: def $cl killed $cl killed $ecx
; X64-NEXT: sarl %cl, %edi
-; X64-NEXT: cmpw %di, %ax
-; X64-NEXT: cmovnel %edx, %esi
-; X64-NEXT: movswl %si, %eax
+; X64-NEXT: cmpw %di, %dx
+; X64-NEXT: cmovnel %esi, %eax
+; X64-NEXT: cwtl
; X64-NEXT: shrl %eax
; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/sshl_sat_vec.ll b/llvm/test/CodeGen/X86/sshl_sat_vec.ll
index 10dee14..ff76707 100644
--- a/llvm/test/CodeGen/X86/sshl_sat_vec.ll
+++ b/llvm/test/CodeGen/X86/sshl_sat_vec.ll
@@ -365,119 +365,118 @@ define <8 x i16> @vec_v8i16(<8 x i16> %x, <8 x i16> %y) nounwind {
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
; X86-NEXT: subl $16, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %edi, %ebx
-; X86-NEXT: shll %cl, %ebx
-; X86-NEXT: movswl %bx, %ebp
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: shll %cl, %edi
+; X86-NEXT: movswl %di, %ebp
; X86-NEXT: sarl %cl, %ebp
; X86-NEXT: xorl %ecx, %ecx
-; X86-NEXT: testw %di, %di
+; X86-NEXT: testw %bx, %bx
; X86-NEXT: sets %cl
; X86-NEXT: addl $32767, %ecx # imm = 0x7FFF
-; X86-NEXT: cmpw %bp, %di
-; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: cmovel %ebx, %ecx
+; X86-NEXT: cmpw %bp, %bx
+; X86-NEXT: movl %esi, %ebx
+; X86-NEXT: cmovel %edi, %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl %esi, %edi
-; X86-NEXT: movl %eax, %ecx
-; X86-NEXT: shll %cl, %edi
-; X86-NEXT: movswl %di, %ebx
-; X86-NEXT: sarl %cl, %ebx
-; X86-NEXT: xorl %eax, %eax
-; X86-NEXT: testw %si, %si
-; X86-NEXT: sets %al
-; X86-NEXT: addl $32767, %eax # imm = 0x7FFF
-; X86-NEXT: cmpw %bx, %si
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: cmovel %edi, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl %edx, %esi
; X86-NEXT: shll %cl, %esi
; X86-NEXT: movswl %si, %edi
; X86-NEXT: sarl %cl, %edi
-; X86-NEXT: xorl %eax, %eax
-; X86-NEXT: testw %dx, %dx
-; X86-NEXT: sets %al
-; X86-NEXT: addl $32767, %eax # imm = 0x7FFF
-; X86-NEXT: cmpw %di, %dx
+; X86-NEXT: xorl %ecx, %ecx
+; X86-NEXT: testw %bx, %bx
+; X86-NEXT: sets %cl
+; X86-NEXT: addl $32767, %ecx # imm = 0x7FFF
+; X86-NEXT: movl %ecx, %ebp
+; X86-NEXT: cmpw %di, %bx
+; X86-NEXT: movl %edx, %edi
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: cmovel %esi, %eax
-; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movl %eax, %edx
+; X86-NEXT: cmovel %esi, %ebp
; X86-NEXT: shll %cl, %edx
; X86-NEXT: movswl %dx, %esi
; X86-NEXT: sarl %cl, %esi
; X86-NEXT: xorl %ebx, %ebx
-; X86-NEXT: testw %ax, %ax
+; X86-NEXT: testw %di, %di
; X86-NEXT: sets %bl
; X86-NEXT: addl $32767, %ebx # imm = 0x7FFF
-; X86-NEXT: cmpw %si, %ax
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: cmpw %si, %di
+; X86-NEXT: movl %eax, %esi
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: cmovel %edx, %ebx
-; X86-NEXT: movl %ebx, (%esp) # 4-byte Spill
-; X86-NEXT: movl %eax, %edx
+; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: movswl %ax, %edx
+; X86-NEXT: sarl %cl, %edx
+; X86-NEXT: xorl %ecx, %ecx
+; X86-NEXT: testw %si, %si
+; X86-NEXT: sets %cl
+; X86-NEXT: addl $32767, %ecx # imm = 0x7FFF
+; X86-NEXT: cmpw %dx, %si
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl %edx, %esi
+; X86-NEXT: cmovel %eax, %ecx
+; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: shll %cl, %edx
-; X86-NEXT: movswl %dx, %esi
-; X86-NEXT: sarl %cl, %esi
+; X86-NEXT: movswl %dx, %eax
+; X86-NEXT: sarl %cl, %eax
; X86-NEXT: xorl %ecx, %ecx
-; X86-NEXT: testw %ax, %ax
+; X86-NEXT: testw %si, %si
; X86-NEXT: sets %cl
; X86-NEXT: addl $32767, %ecx # imm = 0x7FFF
-; X86-NEXT: cmpw %si, %ax
+; X86-NEXT: cmpw %ax, %si
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: cmovel %edx, %ecx
-; X86-NEXT: movl %ecx, %ebp
+; X86-NEXT: movl %ecx, (%esp) # 4-byte Spill
; X86-NEXT: movl %eax, %edx
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: shll %cl, %edx
-; X86-NEXT: movswl %dx, %esi
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: movswl %ax, %esi
; X86-NEXT: sarl %cl, %esi
; X86-NEXT: xorl %ebx, %ebx
-; X86-NEXT: testw %ax, %ax
+; X86-NEXT: testw %dx, %dx
; X86-NEXT: sets %bl
; X86-NEXT: addl $32767, %ebx # imm = 0x7FFF
-; X86-NEXT: cmpw %si, %ax
-; X86-NEXT: cmovel %edx, %ebx
+; X86-NEXT: cmpw %si, %dx
+; X86-NEXT: cmovel %eax, %ebx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl %eax, %esi
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: shll %cl, %esi
-; X86-NEXT: movswl %si, %edi
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: movswl %ax, %edi
; X86-NEXT: sarl %cl, %edi
; X86-NEXT: xorl %edx, %edx
-; X86-NEXT: testw %ax, %ax
+; X86-NEXT: testw %si, %si
; X86-NEXT: sets %dl
; X86-NEXT: addl $32767, %edx # imm = 0x7FFF
-; X86-NEXT: cmpw %di, %ax
-; X86-NEXT: cmovel %esi, %edx
+; X86-NEXT: cmpw %di, %si
+; X86-NEXT: cmovel %eax, %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl %eax, %esi
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: shll %cl, %esi
-; X86-NEXT: movswl %si, %edi
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: movswl %ax, %edi
; X86-NEXT: sarl %cl, %edi
; X86-NEXT: xorl %ecx, %ecx
-; X86-NEXT: testw %ax, %ax
+; X86-NEXT: testw %si, %si
; X86-NEXT: sets %cl
; X86-NEXT: addl $32767, %ecx # imm = 0x7FFF
-; X86-NEXT: cmpw %di, %ax
-; X86-NEXT: cmovel %esi, %ecx
+; X86-NEXT: cmpw %di, %si
+; X86-NEXT: cmovel %eax, %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movw %cx, 14(%eax)
; X86-NEXT: movw %dx, 12(%eax)
; X86-NEXT: movw %bx, 10(%eax)
-; X86-NEXT: movw %bp, 8(%eax)
; X86-NEXT: movl (%esp), %ecx # 4-byte Reload
+; X86-NEXT: movw %cx, 8(%eax)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movw %cx, 6(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movw %cx, 4(%eax)
-; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-NEXT: movw %cx, 2(%eax)
+; X86-NEXT: movw %bp, 2(%eax)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movw %cx, (%eax)
; X86-NEXT: addl $16, %esp
diff --git a/llvm/test/CodeGen/X86/stackmap.ll b/llvm/test/CodeGen/X86/stackmap.ll
index 72406aa..9bf88cb 100644
--- a/llvm/test/CodeGen/X86/stackmap.ll
+++ b/llvm/test/CodeGen/X86/stackmap.ll
@@ -1,7 +1,10 @@
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7 -terminal-rule=0 | FileCheck %s
;
; Note: Print verbose stackmaps using -debug-only=stackmaps.
+; FIXME: Test should be fixed to produce the correct sized spill with
+; -terminal-rule=0 flag removed
+
; CHECK-LABEL: .section __LLVM_STACKMAPS,__llvm_stackmaps
; CHECK-NEXT: __LLVM_StackMaps:
; Header
@@ -546,8 +549,8 @@ define void @clobberScratch(i32 %a) {
ret void
}
-; A stack frame which needs to be realigned at runtime (to meet alignment
-; criteria for values on the stack) does not have a fixed frame size.
+; A stack frame which needs to be realigned at runtime (to meet alignment
+; criteria for values on the stack) does not have a fixed frame size.
; CHECK-LABEL: .long L{{.*}}-_needsStackRealignment
; CHECK-NEXT: .short 0
; 0 locations
diff --git a/llvm/test/CodeGen/X86/subvectorwise-store-of-vector-splat.ll b/llvm/test/CodeGen/X86/subvectorwise-store-of-vector-splat.ll
index 5bd624c..01fbafb 100644
--- a/llvm/test/CodeGen/X86/subvectorwise-store-of-vector-splat.ll
+++ b/llvm/test/CodeGen/X86/subvectorwise-store-of-vector-splat.ll
@@ -2429,126 +2429,126 @@ define void @vec384_v3i8(ptr %in.subvec.ptr, ptr %out.subvec.ptr, ptr %out.vec.p
; SSE2-ONLY: # %bb.0:
; SSE2-ONLY-NEXT: movl (%rdi), %eax
; SSE2-ONLY-NEXT: notl %eax
-; SSE2-ONLY-NEXT: movw %ax, (%rsi)
; SSE2-ONLY-NEXT: movl %eax, %ecx
-; SSE2-ONLY-NEXT: shrl $16, %ecx
-; SSE2-ONLY-NEXT: movb %cl, 2(%rsi)
-; SSE2-ONLY-NEXT: movb %cl, 2(%rdx)
-; SSE2-ONLY-NEXT: movw %ax, (%rdx)
-; SSE2-ONLY-NEXT: movb %cl, 6(%rdx)
-; SSE2-ONLY-NEXT: movw %ax, 4(%rdx)
-; SSE2-ONLY-NEXT: movb %cl, 10(%rdx)
-; SSE2-ONLY-NEXT: movw %ax, 8(%rdx)
-; SSE2-ONLY-NEXT: movb %cl, 14(%rdx)
-; SSE2-ONLY-NEXT: movw %ax, 12(%rdx)
-; SSE2-ONLY-NEXT: movb %cl, 18(%rdx)
-; SSE2-ONLY-NEXT: movw %ax, 16(%rdx)
-; SSE2-ONLY-NEXT: movb %cl, 22(%rdx)
-; SSE2-ONLY-NEXT: movw %ax, 20(%rdx)
-; SSE2-ONLY-NEXT: movb %cl, 26(%rdx)
-; SSE2-ONLY-NEXT: movw %ax, 24(%rdx)
-; SSE2-ONLY-NEXT: movb %cl, 30(%rdx)
-; SSE2-ONLY-NEXT: movw %ax, 28(%rdx)
-; SSE2-ONLY-NEXT: movb %cl, 34(%rdx)
-; SSE2-ONLY-NEXT: movw %ax, 32(%rdx)
-; SSE2-ONLY-NEXT: movb %cl, 38(%rdx)
-; SSE2-ONLY-NEXT: movw %ax, 36(%rdx)
-; SSE2-ONLY-NEXT: movb %cl, 42(%rdx)
-; SSE2-ONLY-NEXT: movw %ax, 40(%rdx)
-; SSE2-ONLY-NEXT: movb %cl, 46(%rdx)
-; SSE2-ONLY-NEXT: movw %ax, 44(%rdx)
-; SSE2-ONLY-NEXT: movb %cl, 50(%rdx)
-; SSE2-ONLY-NEXT: movw %ax, 48(%rdx)
-; SSE2-ONLY-NEXT: movb %cl, 54(%rdx)
-; SSE2-ONLY-NEXT: movw %ax, 52(%rdx)
-; SSE2-ONLY-NEXT: movb %cl, 58(%rdx)
-; SSE2-ONLY-NEXT: movw %ax, 56(%rdx)
-; SSE2-ONLY-NEXT: movb %cl, 62(%rdx)
-; SSE2-ONLY-NEXT: movw %ax, 60(%rdx)
+; SSE2-ONLY-NEXT: movw %ax, (%rsi)
+; SSE2-ONLY-NEXT: shrl $16, %eax
+; SSE2-ONLY-NEXT: movb %al, 2(%rsi)
+; SSE2-ONLY-NEXT: movb %al, 2(%rdx)
+; SSE2-ONLY-NEXT: movw %cx, (%rdx)
+; SSE2-ONLY-NEXT: movb %al, 6(%rdx)
+; SSE2-ONLY-NEXT: movw %cx, 4(%rdx)
+; SSE2-ONLY-NEXT: movb %al, 10(%rdx)
+; SSE2-ONLY-NEXT: movw %cx, 8(%rdx)
+; SSE2-ONLY-NEXT: movb %al, 14(%rdx)
+; SSE2-ONLY-NEXT: movw %cx, 12(%rdx)
+; SSE2-ONLY-NEXT: movb %al, 18(%rdx)
+; SSE2-ONLY-NEXT: movw %cx, 16(%rdx)
+; SSE2-ONLY-NEXT: movb %al, 22(%rdx)
+; SSE2-ONLY-NEXT: movw %cx, 20(%rdx)
+; SSE2-ONLY-NEXT: movb %al, 26(%rdx)
+; SSE2-ONLY-NEXT: movw %cx, 24(%rdx)
+; SSE2-ONLY-NEXT: movb %al, 30(%rdx)
+; SSE2-ONLY-NEXT: movw %cx, 28(%rdx)
+; SSE2-ONLY-NEXT: movb %al, 34(%rdx)
+; SSE2-ONLY-NEXT: movw %cx, 32(%rdx)
+; SSE2-ONLY-NEXT: movb %al, 38(%rdx)
+; SSE2-ONLY-NEXT: movw %cx, 36(%rdx)
+; SSE2-ONLY-NEXT: movb %al, 42(%rdx)
+; SSE2-ONLY-NEXT: movw %cx, 40(%rdx)
+; SSE2-ONLY-NEXT: movb %al, 46(%rdx)
+; SSE2-ONLY-NEXT: movw %cx, 44(%rdx)
+; SSE2-ONLY-NEXT: movb %al, 50(%rdx)
+; SSE2-ONLY-NEXT: movw %cx, 48(%rdx)
+; SSE2-ONLY-NEXT: movb %al, 54(%rdx)
+; SSE2-ONLY-NEXT: movw %cx, 52(%rdx)
+; SSE2-ONLY-NEXT: movb %al, 58(%rdx)
+; SSE2-ONLY-NEXT: movw %cx, 56(%rdx)
+; SSE2-ONLY-NEXT: movb %al, 62(%rdx)
+; SSE2-ONLY-NEXT: movw %cx, 60(%rdx)
; SSE2-ONLY-NEXT: retq
;
; SSE3-LABEL: vec384_v3i8:
; SSE3: # %bb.0:
; SSE3-NEXT: movl (%rdi), %eax
; SSE3-NEXT: notl %eax
-; SSE3-NEXT: movw %ax, (%rsi)
; SSE3-NEXT: movl %eax, %ecx
-; SSE3-NEXT: shrl $16, %ecx
-; SSE3-NEXT: movb %cl, 2(%rsi)
-; SSE3-NEXT: movb %cl, 2(%rdx)
-; SSE3-NEXT: movw %ax, (%rdx)
-; SSE3-NEXT: movb %cl, 6(%rdx)
-; SSE3-NEXT: movw %ax, 4(%rdx)
-; SSE3-NEXT: movb %cl, 10(%rdx)
-; SSE3-NEXT: movw %ax, 8(%rdx)
-; SSE3-NEXT: movb %cl, 14(%rdx)
-; SSE3-NEXT: movw %ax, 12(%rdx)
-; SSE3-NEXT: movb %cl, 18(%rdx)
-; SSE3-NEXT: movw %ax, 16(%rdx)
-; SSE3-NEXT: movb %cl, 22(%rdx)
-; SSE3-NEXT: movw %ax, 20(%rdx)
-; SSE3-NEXT: movb %cl, 26(%rdx)
-; SSE3-NEXT: movw %ax, 24(%rdx)
-; SSE3-NEXT: movb %cl, 30(%rdx)
-; SSE3-NEXT: movw %ax, 28(%rdx)
-; SSE3-NEXT: movb %cl, 34(%rdx)
-; SSE3-NEXT: movw %ax, 32(%rdx)
-; SSE3-NEXT: movb %cl, 38(%rdx)
-; SSE3-NEXT: movw %ax, 36(%rdx)
-; SSE3-NEXT: movb %cl, 42(%rdx)
-; SSE3-NEXT: movw %ax, 40(%rdx)
-; SSE3-NEXT: movb %cl, 46(%rdx)
-; SSE3-NEXT: movw %ax, 44(%rdx)
-; SSE3-NEXT: movb %cl, 50(%rdx)
-; SSE3-NEXT: movw %ax, 48(%rdx)
-; SSE3-NEXT: movb %cl, 54(%rdx)
-; SSE3-NEXT: movw %ax, 52(%rdx)
-; SSE3-NEXT: movb %cl, 58(%rdx)
-; SSE3-NEXT: movw %ax, 56(%rdx)
-; SSE3-NEXT: movb %cl, 62(%rdx)
-; SSE3-NEXT: movw %ax, 60(%rdx)
+; SSE3-NEXT: movw %ax, (%rsi)
+; SSE3-NEXT: shrl $16, %eax
+; SSE3-NEXT: movb %al, 2(%rsi)
+; SSE3-NEXT: movb %al, 2(%rdx)
+; SSE3-NEXT: movw %cx, (%rdx)
+; SSE3-NEXT: movb %al, 6(%rdx)
+; SSE3-NEXT: movw %cx, 4(%rdx)
+; SSE3-NEXT: movb %al, 10(%rdx)
+; SSE3-NEXT: movw %cx, 8(%rdx)
+; SSE3-NEXT: movb %al, 14(%rdx)
+; SSE3-NEXT: movw %cx, 12(%rdx)
+; SSE3-NEXT: movb %al, 18(%rdx)
+; SSE3-NEXT: movw %cx, 16(%rdx)
+; SSE3-NEXT: movb %al, 22(%rdx)
+; SSE3-NEXT: movw %cx, 20(%rdx)
+; SSE3-NEXT: movb %al, 26(%rdx)
+; SSE3-NEXT: movw %cx, 24(%rdx)
+; SSE3-NEXT: movb %al, 30(%rdx)
+; SSE3-NEXT: movw %cx, 28(%rdx)
+; SSE3-NEXT: movb %al, 34(%rdx)
+; SSE3-NEXT: movw %cx, 32(%rdx)
+; SSE3-NEXT: movb %al, 38(%rdx)
+; SSE3-NEXT: movw %cx, 36(%rdx)
+; SSE3-NEXT: movb %al, 42(%rdx)
+; SSE3-NEXT: movw %cx, 40(%rdx)
+; SSE3-NEXT: movb %al, 46(%rdx)
+; SSE3-NEXT: movw %cx, 44(%rdx)
+; SSE3-NEXT: movb %al, 50(%rdx)
+; SSE3-NEXT: movw %cx, 48(%rdx)
+; SSE3-NEXT: movb %al, 54(%rdx)
+; SSE3-NEXT: movw %cx, 52(%rdx)
+; SSE3-NEXT: movb %al, 58(%rdx)
+; SSE3-NEXT: movw %cx, 56(%rdx)
+; SSE3-NEXT: movb %al, 62(%rdx)
+; SSE3-NEXT: movw %cx, 60(%rdx)
; SSE3-NEXT: retq
;
; SSSE3-ONLY-LABEL: vec384_v3i8:
; SSSE3-ONLY: # %bb.0:
; SSSE3-ONLY-NEXT: movl (%rdi), %eax
; SSSE3-ONLY-NEXT: notl %eax
-; SSSE3-ONLY-NEXT: movw %ax, (%rsi)
; SSSE3-ONLY-NEXT: movl %eax, %ecx
-; SSSE3-ONLY-NEXT: shrl $16, %ecx
-; SSSE3-ONLY-NEXT: movb %cl, 2(%rsi)
-; SSSE3-ONLY-NEXT: movb %cl, 2(%rdx)
-; SSSE3-ONLY-NEXT: movw %ax, (%rdx)
-; SSSE3-ONLY-NEXT: movb %cl, 6(%rdx)
-; SSSE3-ONLY-NEXT: movw %ax, 4(%rdx)
-; SSSE3-ONLY-NEXT: movb %cl, 10(%rdx)
-; SSSE3-ONLY-NEXT: movw %ax, 8(%rdx)
-; SSSE3-ONLY-NEXT: movb %cl, 14(%rdx)
-; SSSE3-ONLY-NEXT: movw %ax, 12(%rdx)
-; SSSE3-ONLY-NEXT: movb %cl, 18(%rdx)
-; SSSE3-ONLY-NEXT: movw %ax, 16(%rdx)
-; SSSE3-ONLY-NEXT: movb %cl, 22(%rdx)
-; SSSE3-ONLY-NEXT: movw %ax, 20(%rdx)
-; SSSE3-ONLY-NEXT: movb %cl, 26(%rdx)
-; SSSE3-ONLY-NEXT: movw %ax, 24(%rdx)
-; SSSE3-ONLY-NEXT: movb %cl, 30(%rdx)
-; SSSE3-ONLY-NEXT: movw %ax, 28(%rdx)
-; SSSE3-ONLY-NEXT: movb %cl, 34(%rdx)
-; SSSE3-ONLY-NEXT: movw %ax, 32(%rdx)
-; SSSE3-ONLY-NEXT: movb %cl, 38(%rdx)
-; SSSE3-ONLY-NEXT: movw %ax, 36(%rdx)
-; SSSE3-ONLY-NEXT: movb %cl, 42(%rdx)
-; SSSE3-ONLY-NEXT: movw %ax, 40(%rdx)
-; SSSE3-ONLY-NEXT: movb %cl, 46(%rdx)
-; SSSE3-ONLY-NEXT: movw %ax, 44(%rdx)
-; SSSE3-ONLY-NEXT: movb %cl, 50(%rdx)
-; SSSE3-ONLY-NEXT: movw %ax, 48(%rdx)
-; SSSE3-ONLY-NEXT: movb %cl, 54(%rdx)
-; SSSE3-ONLY-NEXT: movw %ax, 52(%rdx)
-; SSSE3-ONLY-NEXT: movb %cl, 58(%rdx)
-; SSSE3-ONLY-NEXT: movw %ax, 56(%rdx)
-; SSSE3-ONLY-NEXT: movb %cl, 62(%rdx)
-; SSSE3-ONLY-NEXT: movw %ax, 60(%rdx)
+; SSSE3-ONLY-NEXT: movw %ax, (%rsi)
+; SSSE3-ONLY-NEXT: shrl $16, %eax
+; SSSE3-ONLY-NEXT: movb %al, 2(%rsi)
+; SSSE3-ONLY-NEXT: movb %al, 2(%rdx)
+; SSSE3-ONLY-NEXT: movw %cx, (%rdx)
+; SSSE3-ONLY-NEXT: movb %al, 6(%rdx)
+; SSSE3-ONLY-NEXT: movw %cx, 4(%rdx)
+; SSSE3-ONLY-NEXT: movb %al, 10(%rdx)
+; SSSE3-ONLY-NEXT: movw %cx, 8(%rdx)
+; SSSE3-ONLY-NEXT: movb %al, 14(%rdx)
+; SSSE3-ONLY-NEXT: movw %cx, 12(%rdx)
+; SSSE3-ONLY-NEXT: movb %al, 18(%rdx)
+; SSSE3-ONLY-NEXT: movw %cx, 16(%rdx)
+; SSSE3-ONLY-NEXT: movb %al, 22(%rdx)
+; SSSE3-ONLY-NEXT: movw %cx, 20(%rdx)
+; SSSE3-ONLY-NEXT: movb %al, 26(%rdx)
+; SSSE3-ONLY-NEXT: movw %cx, 24(%rdx)
+; SSSE3-ONLY-NEXT: movb %al, 30(%rdx)
+; SSSE3-ONLY-NEXT: movw %cx, 28(%rdx)
+; SSSE3-ONLY-NEXT: movb %al, 34(%rdx)
+; SSSE3-ONLY-NEXT: movw %cx, 32(%rdx)
+; SSSE3-ONLY-NEXT: movb %al, 38(%rdx)
+; SSSE3-ONLY-NEXT: movw %cx, 36(%rdx)
+; SSSE3-ONLY-NEXT: movb %al, 42(%rdx)
+; SSSE3-ONLY-NEXT: movw %cx, 40(%rdx)
+; SSSE3-ONLY-NEXT: movb %al, 46(%rdx)
+; SSSE3-ONLY-NEXT: movw %cx, 44(%rdx)
+; SSSE3-ONLY-NEXT: movb %al, 50(%rdx)
+; SSSE3-ONLY-NEXT: movw %cx, 48(%rdx)
+; SSSE3-ONLY-NEXT: movb %al, 54(%rdx)
+; SSSE3-ONLY-NEXT: movw %cx, 52(%rdx)
+; SSSE3-ONLY-NEXT: movb %al, 58(%rdx)
+; SSSE3-ONLY-NEXT: movw %cx, 56(%rdx)
+; SSSE3-ONLY-NEXT: movb %al, 62(%rdx)
+; SSSE3-ONLY-NEXT: movw %cx, 60(%rdx)
; SSSE3-ONLY-NEXT: retq
;
; SSE41-LABEL: vec384_v3i8:
diff --git a/llvm/test/CodeGen/X86/twoaddr-lea.ll b/llvm/test/CodeGen/X86/twoaddr-lea.ll
index f20b777..3ad3e9a 100644
--- a/llvm/test/CodeGen/X86/twoaddr-lea.ll
+++ b/llvm/test/CodeGen/X86/twoaddr-lea.ll
@@ -65,10 +65,10 @@ entry:
define void @ham() {
; CHECK-LABEL: ham:
; CHECK: ## %bb.0: ## %bb
+; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: xorl %ecx, %ecx
; CHECK-NEXT: movq _global@GOTPCREL(%rip), %rdx
; CHECK-NEXT: movq _global2@GOTPCREL(%rip), %rsi
-; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: testb %cl, %cl
; CHECK-NEXT: je LBB3_2
; CHECK-NEXT: .p2align 4
diff --git a/llvm/test/CodeGen/X86/umul_fix.ll b/llvm/test/CodeGen/X86/umul_fix.ll
index eacc714..5a68484 100644
--- a/llvm/test/CodeGen/X86/umul_fix.ll
+++ b/llvm/test/CodeGen/X86/umul_fix.ll
@@ -10,10 +10,10 @@ declare <4 x i32> @llvm.umul.fix.v4i32(<4 x i32>, <4 x i32>, i32)
define i32 @func(i32 %x, i32 %y) nounwind {
; X64-LABEL: func:
; X64: # %bb.0:
-; X64-NEXT: movl %esi, %eax
-; X64-NEXT: movl %edi, %ecx
-; X64-NEXT: imulq %rax, %rcx
-; X64-NEXT: movq %rcx, %rax
+; X64-NEXT: movl %esi, %ecx
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: imulq %rcx, %rax
+; X64-NEXT: movl %eax, %ecx
; X64-NEXT: shrq $32, %rax
; X64-NEXT: shldl $30, %ecx, %eax
; X64-NEXT: # kill: def $eax killed $eax killed $rax
diff --git a/llvm/test/CodeGen/X86/ushl_sat.ll b/llvm/test/CodeGen/X86/ushl_sat.ll
index e0e1ef7..9768e47 100644
--- a/llvm/test/CodeGen/X86/ushl_sat.ll
+++ b/llvm/test/CodeGen/X86/ushl_sat.ll
@@ -14,23 +14,23 @@ define i16 @func(i16 %x, i16 %y) nounwind {
; X64-LABEL: func:
; X64: # %bb.0:
; X64-NEXT: movl %esi, %ecx
-; X64-NEXT: movl %edi, %edx
-; X64-NEXT: shll %cl, %edx
-; X64-NEXT: movzwl %dx, %eax
+; X64-NEXT: movl %edi, %eax
+; X64-NEXT: shll %cl, %edi
+; X64-NEXT: movzwl %di, %edx
; X64-NEXT: # kill: def $cl killed $cl killed $ecx
-; X64-NEXT: shrl %cl, %eax
-; X64-NEXT: cmpw %ax, %di
+; X64-NEXT: shrl %cl, %edx
+; X64-NEXT: cmpw %dx, %ax
; X64-NEXT: movl $65535, %eax # imm = 0xFFFF
-; X64-NEXT: cmovel %edx, %eax
+; X64-NEXT: cmovel %edi, %eax
; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
;
; X86-LABEL: func:
; X86: # %bb.0:
; X86-NEXT: pushl %esi
-; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl %edx, %eax
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl %eax, %edx
; X86-NEXT: shll %cl, %edx
; X86-NEXT: movzwl %dx, %esi
; X86-NEXT: shrl %cl, %esi
@@ -51,14 +51,14 @@ define i16 @func2(i8 %x, i8 %y) nounwind {
; X64-NEXT: movsbl %dil, %eax
; X64-NEXT: addl %eax, %eax
; X64-NEXT: movl %eax, %edx
-; X64-NEXT: shll %cl, %edx
-; X64-NEXT: movzwl %dx, %esi
+; X64-NEXT: shll %cl, %eax
+; X64-NEXT: movzwl %ax, %esi
; X64-NEXT: # kill: def $cl killed $cl killed $ecx
; X64-NEXT: shrl %cl, %esi
-; X64-NEXT: cmpw %si, %ax
-; X64-NEXT: movl $65535, %eax # imm = 0xFFFF
-; X64-NEXT: cmovel %edx, %eax
-; X64-NEXT: cwtl
+; X64-NEXT: cmpw %si, %dx
+; X64-NEXT: movl $65535, %ecx # imm = 0xFFFF
+; X64-NEXT: cmovel %eax, %ecx
+; X64-NEXT: movswl %cx, %eax
; X64-NEXT: shrl %eax
; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/ushl_sat_vec.ll b/llvm/test/CodeGen/X86/ushl_sat_vec.ll
index b8e83da..762088c 100644
--- a/llvm/test/CodeGen/X86/ushl_sat_vec.ll
+++ b/llvm/test/CodeGen/X86/ushl_sat_vec.ll
@@ -300,95 +300,94 @@ define <8 x i16> @vec_v8i16(<8 x i16> %x, <8 x i16> %y) nounwind {
; X86-NEXT: pushl %ebx
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
-; X86-NEXT: subl $12, %esp
+; X86-NEXT: subl $16, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
-; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl %ebp, %edi
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: movzbl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl %eax, %edx
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl %eax, %ebx
-; X86-NEXT: shll %cl, %ebx
-; X86-NEXT: movzwl %bx, %edi
-; X86-NEXT: shrl %cl, %edi
-; X86-NEXT: cmpw %di, %ax
-; X86-NEXT: movl $65535, %eax # imm = 0xFFFF
-; X86-NEXT: cmovnel %eax, %ebx
-; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl %esi, %eax
-; X86-NEXT: movl %edx, %ecx
; X86-NEXT: shll %cl, %eax
-; X86-NEXT: movzwl %ax, %edi
-; X86-NEXT: shrl %cl, %edi
-; X86-NEXT: cmpw %di, %si
+; X86-NEXT: movzwl %ax, %esi
+; X86-NEXT: shrl %cl, %esi
+; X86-NEXT: cmpw %si, %dx
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl $65535, %esi # imm = 0xFFFF
-; X86-NEXT: cmovnel %esi, %eax
+; X86-NEXT: movl $65535, %edx # imm = 0xFFFF
+; X86-NEXT: cmovnel %edx, %eax
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-NEXT: movl %ebp, %eax
-; X86-NEXT: shll %cl, %eax
-; X86-NEXT: movzwl %ax, %edx
-; X86-NEXT: shrl %cl, %edx
-; X86-NEXT: cmpw %dx, %bp
-; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: cmovnel %esi, %eax
-; X86-NEXT: movl %eax, (%esp) # 4-byte Spill
-; X86-NEXT: movl $65535, %eax # imm = 0xFFFF
-; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-NEXT: movl %esi, %ebp
; X86-NEXT: shll %cl, %ebp
-; X86-NEXT: movzwl %bp, %edx
-; X86-NEXT: shrl %cl, %edx
-; X86-NEXT: cmpw %dx, %si
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movzwl %bp, %eax
+; X86-NEXT: shrl %cl, %eax
+; X86-NEXT: cmpw %ax, %di
+; X86-NEXT: movl %ebx, %eax
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: cmovnel %eax, %ebp
-; X86-NEXT: movl %edx, %ebx
+; X86-NEXT: cmovnel %edx, %ebp
+; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: shll %cl, %ebx
-; X86-NEXT: movzwl %bx, %esi
-; X86-NEXT: shrl %cl, %esi
-; X86-NEXT: cmpw %si, %dx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movzwl %bx, %edx
+; X86-NEXT: shrl %cl, %edx
+; X86-NEXT: cmpw %dx, %ax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl %edi, %eax
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl $65535, %esi # imm = 0xFFFF
; X86-NEXT: cmovnel %esi, %ebx
+; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-NEXT: shll %cl, %edi
+; X86-NEXT: movzwl %di, %edx
+; X86-NEXT: shrl %cl, %edx
+; X86-NEXT: cmpw %dx, %ax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
+; X86-NEXT: movl %ebp, %eax
+; X86-NEXT: cmovnel %esi, %edi
+; X86-NEXT: movl %edi, (%esp) # 4-byte Spill
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl %edx, %edi
+; X86-NEXT: shll %cl, %ebp
+; X86-NEXT: movzwl %bp, %edx
+; X86-NEXT: shrl %cl, %edx
+; X86-NEXT: cmpw %dx, %ax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: cmovnel %esi, %ebp
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %edi, %eax
; X86-NEXT: shll %cl, %edi
-; X86-NEXT: movzwl %di, %eax
-; X86-NEXT: shrl %cl, %eax
-; X86-NEXT: cmpw %ax, %dx
+; X86-NEXT: movzwl %di, %edx
+; X86-NEXT: shrl %cl, %edx
+; X86-NEXT: cmpw %dx, %ax
; X86-NEXT: cmovnel %esi, %edi
+; X86-NEXT: movl $65535, %ebx # imm = 0xFFFF
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl %edx, %esi
-; X86-NEXT: shll %cl, %esi
-; X86-NEXT: movzwl %si, %eax
+; X86-NEXT: shll %cl, %edx
+; X86-NEXT: movzwl %dx, %eax
; X86-NEXT: shrl %cl, %eax
-; X86-NEXT: cmpw %ax, %dx
-; X86-NEXT: movl $65535, %eax # imm = 0xFFFF
-; X86-NEXT: cmovnel %eax, %esi
+; X86-NEXT: cmpw %ax, %si
+; X86-NEXT: cmovnel %ebx, %edx
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, %ebx
; X86-NEXT: shll %cl, %eax
-; X86-NEXT: movzwl %ax, %edx
-; X86-NEXT: shrl %cl, %edx
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: cmpw %dx, %cx
+; X86-NEXT: movzwl %ax, %esi
+; X86-NEXT: shrl %cl, %esi
+; X86-NEXT: cmpw %si, %bx
; X86-NEXT: movl $65535, %ecx # imm = 0xFFFF
; X86-NEXT: cmovnel %ecx, %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movw %ax, 14(%ecx)
-; X86-NEXT: movw %si, 12(%ecx)
+; X86-NEXT: movw %dx, 12(%ecx)
; X86-NEXT: movw %di, 10(%ecx)
-; X86-NEXT: movw %bx, 8(%ecx)
-; X86-NEXT: movw %bp, 6(%ecx)
+; X86-NEXT: movw %bp, 8(%ecx)
; X86-NEXT: movl (%esp), %eax # 4-byte Reload
+; X86-NEXT: movw %ax, 6(%ecx)
+; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: movw %ax, 4(%ecx)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: movw %ax, 2(%ecx)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: movw %ax, (%ecx)
; X86-NEXT: movl %ecx, %eax
-; X86-NEXT: addl $12, %esp
+; X86-NEXT: addl $16, %esp
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
; X86-NEXT: popl %ebx
diff --git a/llvm/test/CodeGen/X86/vector-mulfix-legalize.ll b/llvm/test/CodeGen/X86/vector-mulfix-legalize.ll
index b233855..324fe12 100644
--- a/llvm/test/CodeGen/X86/vector-mulfix-legalize.ll
+++ b/llvm/test/CodeGen/X86/vector-mulfix-legalize.ll
@@ -85,14 +85,14 @@ define <4 x i16> @smulfixsat(<4 x i16> %a) {
; CHECK-NEXT: movswl %dx, %edx
; CHECK-NEXT: leal (,%rdx,4), %esi
; CHECK-NEXT: movl %esi, %edi
-; CHECK-NEXT: shrl $16, %edi
-; CHECK-NEXT: shldw $1, %si, %di
+; CHECK-NEXT: shrl $16, %esi
+; CHECK-NEXT: shldw $1, %di, %si
; CHECK-NEXT: sarl $14, %edx
; CHECK-NEXT: cmpl $16384, %edx # imm = 0x4000
-; CHECK-NEXT: cmovgel %eax, %edi
+; CHECK-NEXT: cmovgel %eax, %esi
; CHECK-NEXT: cmpl $-16384, %edx # imm = 0xC000
-; CHECK-NEXT: cmovll %ecx, %edi
-; CHECK-NEXT: pinsrw $3, %edi, %xmm1
+; CHECK-NEXT: cmovll %ecx, %esi
+; CHECK-NEXT: pinsrw $3, %esi, %xmm1
; CHECK-NEXT: movdqa %xmm1, %xmm0
; CHECK-NEXT: retq
%t = call <4 x i16> @llvm.smul.fix.sat.v4i16(<4 x i16> <i16 1, i16 2, i16 3, i16 4>, <4 x i16> %a, i32 15)
@@ -106,19 +106,19 @@ define <4 x i16> @umulfixsat(<4 x i16> %a) {
; CHECK-NEXT: pextrw $2, %xmm0, %eax
; CHECK-NEXT: leal (%rax,%rax,2), %eax
; CHECK-NEXT: movl %eax, %edx
-; CHECK-NEXT: shrl $16, %edx
-; CHECK-NEXT: movl %edx, %ecx
-; CHECK-NEXT: shldw $1, %ax, %cx
-; CHECK-NEXT: cmpl $32768, %edx # imm = 0x8000
+; CHECK-NEXT: shrl $16, %eax
+; CHECK-NEXT: movl %eax, %ecx
+; CHECK-NEXT: shldw $1, %dx, %cx
+; CHECK-NEXT: cmpl $32768, %eax # imm = 0x8000
; CHECK-NEXT: movl $65535, %eax # imm = 0xFFFF
; CHECK-NEXT: cmovael %eax, %ecx
; CHECK-NEXT: pextrw $1, %xmm0, %edx
; CHECK-NEXT: addl %edx, %edx
; CHECK-NEXT: movl %edx, %esi
-; CHECK-NEXT: shrl $16, %esi
-; CHECK-NEXT: movl %esi, %edi
-; CHECK-NEXT: shldw $1, %dx, %di
-; CHECK-NEXT: cmpl $32768, %esi # imm = 0x8000
+; CHECK-NEXT: shrl $16, %edx
+; CHECK-NEXT: movl %edx, %edi
+; CHECK-NEXT: shldw $1, %si, %di
+; CHECK-NEXT: cmpl $32768, %edx # imm = 0x8000
; CHECK-NEXT: cmovael %eax, %edi
; CHECK-NEXT: movd %xmm0, %edx
; CHECK-NEXT: xorl %esi, %esi
@@ -133,10 +133,10 @@ define <4 x i16> @umulfixsat(<4 x i16> %a) {
; CHECK-NEXT: pextrw $3, %xmm0, %ecx
; CHECK-NEXT: shll $2, %ecx
; CHECK-NEXT: movl %ecx, %edx
-; CHECK-NEXT: shrl $16, %edx
-; CHECK-NEXT: movl %edx, %esi
-; CHECK-NEXT: shldw $1, %cx, %si
-; CHECK-NEXT: cmpl $32768, %edx # imm = 0x8000
+; CHECK-NEXT: shrl $16, %ecx
+; CHECK-NEXT: movl %ecx, %esi
+; CHECK-NEXT: shldw $1, %dx, %si
+; CHECK-NEXT: cmpl $32768, %ecx # imm = 0x8000
; CHECK-NEXT: cmovael %eax, %esi
; CHECK-NEXT: pinsrw $3, %esi, %xmm1
; CHECK-NEXT: movdqa %xmm1, %xmm0
diff --git a/llvm/test/CodeGen/X86/vector-reduce-xor-bool.ll b/llvm/test/CodeGen/X86/vector-reduce-xor-bool.ll
index 320dce8..6cb4323 100644
--- a/llvm/test/CodeGen/X86/vector-reduce-xor-bool.ll
+++ b/llvm/test/CodeGen/X86/vector-reduce-xor-bool.ll
@@ -397,8 +397,8 @@ define i1 @trunc_v16i16_v16i1(<16 x i16>) nounwind {
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
; AVX512F-NEXT: movl %eax, %ecx
-; AVX512F-NEXT: shrl $8, %ecx
-; AVX512F-NEXT: xorb %al, %cl
+; AVX512F-NEXT: shrl $8, %eax
+; AVX512F-NEXT: xorb %cl, %al
; AVX512F-NEXT: setnp %al
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
@@ -409,8 +409,8 @@ define i1 @trunc_v16i16_v16i1(<16 x i16>) nounwind {
; AVX512BW-NEXT: vpmovw2m %zmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: movl %eax, %ecx
-; AVX512BW-NEXT: shrl $8, %ecx
-; AVX512BW-NEXT: xorb %al, %cl
+; AVX512BW-NEXT: shrl $8, %eax
+; AVX512BW-NEXT: xorb %cl, %al
; AVX512BW-NEXT: setnp %al
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
@@ -421,8 +421,8 @@ define i1 @trunc_v16i16_v16i1(<16 x i16>) nounwind {
; AVX512VL-NEXT: vpmovw2m %ymm0, %k0
; AVX512VL-NEXT: kmovd %k0, %eax
; AVX512VL-NEXT: movl %eax, %ecx
-; AVX512VL-NEXT: shrl $8, %ecx
-; AVX512VL-NEXT: xorb %al, %cl
+; AVX512VL-NEXT: shrl $8, %eax
+; AVX512VL-NEXT: xorb %cl, %al
; AVX512VL-NEXT: setnp %al
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
@@ -722,8 +722,8 @@ define i1 @trunc_v16i32_v16i1(<16 x i32>) nounwind {
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
; AVX512F-NEXT: movl %eax, %ecx
-; AVX512F-NEXT: shrl $8, %ecx
-; AVX512F-NEXT: xorb %al, %cl
+; AVX512F-NEXT: shrl $8, %eax
+; AVX512F-NEXT: xorb %cl, %al
; AVX512F-NEXT: setnp %al
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
@@ -734,8 +734,8 @@ define i1 @trunc_v16i32_v16i1(<16 x i32>) nounwind {
; AVX512BW-NEXT: vptestmd %zmm0, %zmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: movl %eax, %ecx
-; AVX512BW-NEXT: shrl $8, %ecx
-; AVX512BW-NEXT: xorb %al, %cl
+; AVX512BW-NEXT: shrl $8, %eax
+; AVX512BW-NEXT: xorb %cl, %al
; AVX512BW-NEXT: setnp %al
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
@@ -746,8 +746,8 @@ define i1 @trunc_v16i32_v16i1(<16 x i32>) nounwind {
; AVX512VL-NEXT: vptestmd %zmm0, %zmm0, %k0
; AVX512VL-NEXT: kmovd %k0, %eax
; AVX512VL-NEXT: movl %eax, %ecx
-; AVX512VL-NEXT: shrl $8, %ecx
-; AVX512VL-NEXT: xorb %al, %cl
+; AVX512VL-NEXT: shrl $8, %eax
+; AVX512VL-NEXT: xorb %cl, %al
; AVX512VL-NEXT: setnp %al
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
@@ -974,13 +974,13 @@ define i1 @trunc_v64i8_v64i1(<64 x i8>) nounwind {
; AVX512BW-NEXT: vpsllw $7, %zmm0, %zmm0
; AVX512BW-NEXT: vpmovb2m %zmm0, %k0
; AVX512BW-NEXT: kmovq %k0, %rax
-; AVX512BW-NEXT: movq %rax, %rcx
-; AVX512BW-NEXT: shrq $32, %rcx
-; AVX512BW-NEXT: xorl %eax, %ecx
-; AVX512BW-NEXT: movl %ecx, %eax
-; AVX512BW-NEXT: shrl $16, %eax
+; AVX512BW-NEXT: movl %eax, %ecx
+; AVX512BW-NEXT: shrq $32, %rax
; AVX512BW-NEXT: xorl %ecx, %eax
-; AVX512BW-NEXT: xorb %ah, %al
+; AVX512BW-NEXT: movl %eax, %ecx
+; AVX512BW-NEXT: shrl $16, %ecx
+; AVX512BW-NEXT: xorl %eax, %ecx
+; AVX512BW-NEXT: xorb %ch, %cl
; AVX512BW-NEXT: setnp %al
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
@@ -990,13 +990,13 @@ define i1 @trunc_v64i8_v64i1(<64 x i8>) nounwind {
; AVX512VL-NEXT: vpsllw $7, %zmm0, %zmm0
; AVX512VL-NEXT: vpmovb2m %zmm0, %k0
; AVX512VL-NEXT: kmovq %k0, %rax
-; AVX512VL-NEXT: movq %rax, %rcx
-; AVX512VL-NEXT: shrq $32, %rcx
-; AVX512VL-NEXT: xorl %eax, %ecx
-; AVX512VL-NEXT: movl %ecx, %eax
-; AVX512VL-NEXT: shrl $16, %eax
+; AVX512VL-NEXT: movl %eax, %ecx
+; AVX512VL-NEXT: shrq $32, %rax
; AVX512VL-NEXT: xorl %ecx, %eax
-; AVX512VL-NEXT: xorb %ah, %al
+; AVX512VL-NEXT: movl %eax, %ecx
+; AVX512VL-NEXT: shrl $16, %ecx
+; AVX512VL-NEXT: xorl %eax, %ecx
+; AVX512VL-NEXT: xorb %ch, %cl
; AVX512VL-NEXT: setnp %al
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
@@ -1211,8 +1211,8 @@ define i1 @icmp0_v16i8_v16i1(<16 x i8>) nounwind {
; AVX512BW-NEXT: vptestnmb %zmm0, %zmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: movl %eax, %ecx
-; AVX512BW-NEXT: shrl $8, %ecx
-; AVX512BW-NEXT: xorb %al, %cl
+; AVX512BW-NEXT: shrl $8, %eax
+; AVX512BW-NEXT: xorb %cl, %al
; AVX512BW-NEXT: setnp %al
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
@@ -1222,8 +1222,8 @@ define i1 @icmp0_v16i8_v16i1(<16 x i8>) nounwind {
; AVX512VL-NEXT: vptestnmb %xmm0, %xmm0, %k0
; AVX512VL-NEXT: kmovd %k0, %eax
; AVX512VL-NEXT: movl %eax, %ecx
-; AVX512VL-NEXT: shrl $8, %ecx
-; AVX512VL-NEXT: xorb %al, %cl
+; AVX512VL-NEXT: shrl $8, %eax
+; AVX512VL-NEXT: xorb %cl, %al
; AVX512VL-NEXT: setnp %al
; AVX512VL-NEXT: retq
%a = icmp eq <16 x i8> %0, zeroinitializer
@@ -1427,8 +1427,8 @@ define i1 @icmp0_v16i16_v16i1(<16 x i16>) nounwind {
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
; AVX512F-NEXT: movl %eax, %ecx
-; AVX512F-NEXT: shrl $8, %ecx
-; AVX512F-NEXT: xorb %al, %cl
+; AVX512F-NEXT: shrl $8, %eax
+; AVX512F-NEXT: xorb %cl, %al
; AVX512F-NEXT: setnp %al
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
@@ -1439,8 +1439,8 @@ define i1 @icmp0_v16i16_v16i1(<16 x i16>) nounwind {
; AVX512BW-NEXT: vptestnmw %zmm0, %zmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: movl %eax, %ecx
-; AVX512BW-NEXT: shrl $8, %ecx
-; AVX512BW-NEXT: xorb %al, %cl
+; AVX512BW-NEXT: shrl $8, %eax
+; AVX512BW-NEXT: xorb %cl, %al
; AVX512BW-NEXT: setnp %al
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
@@ -1450,8 +1450,8 @@ define i1 @icmp0_v16i16_v16i1(<16 x i16>) nounwind {
; AVX512VL-NEXT: vptestnmw %ymm0, %ymm0, %k0
; AVX512VL-NEXT: kmovd %k0, %eax
; AVX512VL-NEXT: movl %eax, %ecx
-; AVX512VL-NEXT: shrl $8, %ecx
-; AVX512VL-NEXT: xorb %al, %cl
+; AVX512VL-NEXT: shrl $8, %eax
+; AVX512VL-NEXT: xorb %cl, %al
; AVX512VL-NEXT: setnp %al
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
@@ -1756,8 +1756,8 @@ define i1 @icmp0_v16i32_v16i1(<16 x i32>) nounwind {
; AVX512F-NEXT: vptestnmd %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
; AVX512F-NEXT: movl %eax, %ecx
-; AVX512F-NEXT: shrl $8, %ecx
-; AVX512F-NEXT: xorb %al, %cl
+; AVX512F-NEXT: shrl $8, %eax
+; AVX512F-NEXT: xorb %cl, %al
; AVX512F-NEXT: setnp %al
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
@@ -1767,8 +1767,8 @@ define i1 @icmp0_v16i32_v16i1(<16 x i32>) nounwind {
; AVX512BW-NEXT: vptestnmd %zmm0, %zmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: movl %eax, %ecx
-; AVX512BW-NEXT: shrl $8, %ecx
-; AVX512BW-NEXT: xorb %al, %cl
+; AVX512BW-NEXT: shrl $8, %eax
+; AVX512BW-NEXT: xorb %cl, %al
; AVX512BW-NEXT: setnp %al
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
@@ -1778,8 +1778,8 @@ define i1 @icmp0_v16i32_v16i1(<16 x i32>) nounwind {
; AVX512VL-NEXT: vptestnmd %zmm0, %zmm0, %k0
; AVX512VL-NEXT: kmovd %k0, %eax
; AVX512VL-NEXT: movl %eax, %ecx
-; AVX512VL-NEXT: shrl $8, %ecx
-; AVX512VL-NEXT: xorb %al, %cl
+; AVX512VL-NEXT: shrl $8, %eax
+; AVX512VL-NEXT: xorb %cl, %al
; AVX512VL-NEXT: setnp %al
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
@@ -2010,13 +2010,13 @@ define i1 @icmp0_v64i8_v64i1(<64 x i8>) nounwind {
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vptestnmb %zmm0, %zmm0, %k0
; AVX512BW-NEXT: kmovq %k0, %rax
-; AVX512BW-NEXT: movq %rax, %rcx
-; AVX512BW-NEXT: shrq $32, %rcx
-; AVX512BW-NEXT: xorl %eax, %ecx
-; AVX512BW-NEXT: movl %ecx, %eax
-; AVX512BW-NEXT: shrl $16, %eax
+; AVX512BW-NEXT: movl %eax, %ecx
+; AVX512BW-NEXT: shrq $32, %rax
; AVX512BW-NEXT: xorl %ecx, %eax
-; AVX512BW-NEXT: xorb %ah, %al
+; AVX512BW-NEXT: movl %eax, %ecx
+; AVX512BW-NEXT: shrl $16, %ecx
+; AVX512BW-NEXT: xorl %eax, %ecx
+; AVX512BW-NEXT: xorb %ch, %cl
; AVX512BW-NEXT: setnp %al
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
@@ -2025,13 +2025,13 @@ define i1 @icmp0_v64i8_v64i1(<64 x i8>) nounwind {
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vptestnmb %zmm0, %zmm0, %k0
; AVX512VL-NEXT: kmovq %k0, %rax
-; AVX512VL-NEXT: movq %rax, %rcx
-; AVX512VL-NEXT: shrq $32, %rcx
-; AVX512VL-NEXT: xorl %eax, %ecx
-; AVX512VL-NEXT: movl %ecx, %eax
-; AVX512VL-NEXT: shrl $16, %eax
+; AVX512VL-NEXT: movl %eax, %ecx
+; AVX512VL-NEXT: shrq $32, %rax
; AVX512VL-NEXT: xorl %ecx, %eax
-; AVX512VL-NEXT: xorb %ah, %al
+; AVX512VL-NEXT: movl %eax, %ecx
+; AVX512VL-NEXT: shrl $16, %ecx
+; AVX512VL-NEXT: xorl %eax, %ecx
+; AVX512VL-NEXT: xorb %ch, %cl
; AVX512VL-NEXT: setnp %al
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
@@ -2240,8 +2240,8 @@ define i1 @icmp_v16i8_v16i1(<16 x i8>, <16 x i8>) nounwind {
; AVX512BW-NEXT: vpcmpeqb %zmm1, %zmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: movl %eax, %ecx
-; AVX512BW-NEXT: shrl $8, %ecx
-; AVX512BW-NEXT: xorb %al, %cl
+; AVX512BW-NEXT: shrl $8, %eax
+; AVX512BW-NEXT: xorb %cl, %al
; AVX512BW-NEXT: setnp %al
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
@@ -2251,8 +2251,8 @@ define i1 @icmp_v16i8_v16i1(<16 x i8>, <16 x i8>) nounwind {
; AVX512VL-NEXT: vpcmpeqb %xmm1, %xmm0, %k0
; AVX512VL-NEXT: kmovd %k0, %eax
; AVX512VL-NEXT: movl %eax, %ecx
-; AVX512VL-NEXT: shrl $8, %ecx
-; AVX512VL-NEXT: xorb %al, %cl
+; AVX512VL-NEXT: shrl $8, %eax
+; AVX512VL-NEXT: xorb %cl, %al
; AVX512VL-NEXT: setnp %al
; AVX512VL-NEXT: retq
%a = icmp eq <16 x i8> %0, %1
@@ -2504,8 +2504,8 @@ define i1 @icmp_v16i16_v16i1(<16 x i16>, <16 x i16>) nounwind {
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
; AVX512F-NEXT: movl %eax, %ecx
-; AVX512F-NEXT: shrl $8, %ecx
-; AVX512F-NEXT: xorb %al, %cl
+; AVX512F-NEXT: shrl $8, %eax
+; AVX512F-NEXT: xorb %cl, %al
; AVX512F-NEXT: setnp %al
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
@@ -2517,8 +2517,8 @@ define i1 @icmp_v16i16_v16i1(<16 x i16>, <16 x i16>) nounwind {
; AVX512BW-NEXT: vpcmpeqw %zmm1, %zmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: movl %eax, %ecx
-; AVX512BW-NEXT: shrl $8, %ecx
-; AVX512BW-NEXT: xorb %al, %cl
+; AVX512BW-NEXT: shrl $8, %eax
+; AVX512BW-NEXT: xorb %cl, %al
; AVX512BW-NEXT: setnp %al
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
@@ -2528,8 +2528,8 @@ define i1 @icmp_v16i16_v16i1(<16 x i16>, <16 x i16>) nounwind {
; AVX512VL-NEXT: vpcmpeqw %ymm1, %ymm0, %k0
; AVX512VL-NEXT: kmovd %k0, %eax
; AVX512VL-NEXT: movl %eax, %ecx
-; AVX512VL-NEXT: shrl $8, %ecx
-; AVX512VL-NEXT: xorb %al, %cl
+; AVX512VL-NEXT: shrl $8, %eax
+; AVX512VL-NEXT: xorb %cl, %al
; AVX512VL-NEXT: setnp %al
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
@@ -2845,8 +2845,8 @@ define i1 @icmp_v16i32_v16i1(<16 x i32>, <16 x i32>) nounwind {
; AVX512F-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; AVX512F-NEXT: kmovw %k0, %eax
; AVX512F-NEXT: movl %eax, %ecx
-; AVX512F-NEXT: shrl $8, %ecx
-; AVX512F-NEXT: xorb %al, %cl
+; AVX512F-NEXT: shrl $8, %eax
+; AVX512F-NEXT: xorb %cl, %al
; AVX512F-NEXT: setnp %al
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
@@ -2856,8 +2856,8 @@ define i1 @icmp_v16i32_v16i1(<16 x i32>, <16 x i32>) nounwind {
; AVX512BW-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: movl %eax, %ecx
-; AVX512BW-NEXT: shrl $8, %ecx
-; AVX512BW-NEXT: xorb %al, %cl
+; AVX512BW-NEXT: shrl $8, %eax
+; AVX512BW-NEXT: xorb %cl, %al
; AVX512BW-NEXT: setnp %al
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
@@ -2867,8 +2867,8 @@ define i1 @icmp_v16i32_v16i1(<16 x i32>, <16 x i32>) nounwind {
; AVX512VL-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; AVX512VL-NEXT: kmovd %k0, %eax
; AVX512VL-NEXT: movl %eax, %ecx
-; AVX512VL-NEXT: shrl $8, %ecx
-; AVX512VL-NEXT: xorb %al, %cl
+; AVX512VL-NEXT: shrl $8, %eax
+; AVX512VL-NEXT: xorb %cl, %al
; AVX512VL-NEXT: setnp %al
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
@@ -3097,13 +3097,13 @@ define i1 @icmp_v64i8_v64i1(<64 x i8>, <64 x i8>) nounwind {
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpcmpeqb %zmm1, %zmm0, %k0
; AVX512BW-NEXT: kmovq %k0, %rax
-; AVX512BW-NEXT: movq %rax, %rcx
-; AVX512BW-NEXT: shrq $32, %rcx
-; AVX512BW-NEXT: xorl %eax, %ecx
-; AVX512BW-NEXT: movl %ecx, %eax
-; AVX512BW-NEXT: shrl $16, %eax
+; AVX512BW-NEXT: movl %eax, %ecx
+; AVX512BW-NEXT: shrq $32, %rax
; AVX512BW-NEXT: xorl %ecx, %eax
-; AVX512BW-NEXT: xorb %ah, %al
+; AVX512BW-NEXT: movl %eax, %ecx
+; AVX512BW-NEXT: shrl $16, %ecx
+; AVX512BW-NEXT: xorl %eax, %ecx
+; AVX512BW-NEXT: xorb %ch, %cl
; AVX512BW-NEXT: setnp %al
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
@@ -3112,13 +3112,13 @@ define i1 @icmp_v64i8_v64i1(<64 x i8>, <64 x i8>) nounwind {
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpcmpeqb %zmm1, %zmm0, %k0
; AVX512VL-NEXT: kmovq %k0, %rax
-; AVX512VL-NEXT: movq %rax, %rcx
-; AVX512VL-NEXT: shrq $32, %rcx
-; AVX512VL-NEXT: xorl %eax, %ecx
-; AVX512VL-NEXT: movl %ecx, %eax
-; AVX512VL-NEXT: shrl $16, %eax
+; AVX512VL-NEXT: movl %eax, %ecx
+; AVX512VL-NEXT: shrq $32, %rax
; AVX512VL-NEXT: xorl %ecx, %eax
-; AVX512VL-NEXT: xorb %ah, %al
+; AVX512VL-NEXT: movl %eax, %ecx
+; AVX512VL-NEXT: shrl $16, %ecx
+; AVX512VL-NEXT: xorl %eax, %ecx
+; AVX512VL-NEXT: xorb %ch, %cl
; AVX512VL-NEXT: setnp %al
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/wide-scalar-shift-by-byte-multiple-legalization.ll b/llvm/test/CodeGen/X86/wide-scalar-shift-by-byte-multiple-legalization.ll
index 3c98eba6..1c3d27f 100644
--- a/llvm/test/CodeGen/X86/wide-scalar-shift-by-byte-multiple-legalization.ll
+++ b/llvm/test/CodeGen/X86/wide-scalar-shift-by-byte-multiple-legalization.ll
@@ -777,31 +777,31 @@ define void @lshr_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK18-NEXT: movl %edi, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movl %esi, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movl %edx, (%esp)
+; FALLBACK18-NEXT: movl %eax, %ecx
; FALLBACK18-NEXT: andb $12, %bl
-; FALLBACK18-NEXT: movzbl %bl, %esi
-; FALLBACK18-NEXT: movl 4(%esp,%esi), %edi
-; FALLBACK18-NEXT: movl 8(%esp,%esi), %ebx
-; FALLBACK18-NEXT: shrxl %eax, %edi, %ebp
-; FALLBACK18-NEXT: movl %eax, %edx
-; FALLBACK18-NEXT: notb %dl
-; FALLBACK18-NEXT: leal (%ebx,%ebx), %ecx
-; FALLBACK18-NEXT: shlxl %edx, %ecx, %ecx
-; FALLBACK18-NEXT: orl %ebp, %ecx
-; FALLBACK18-NEXT: shrxl %eax, (%esp,%esi), %ebp
-; FALLBACK18-NEXT: addl %edi, %edi
-; FALLBACK18-NEXT: shlxl %edx, %edi, %edi
-; FALLBACK18-NEXT: orl %ebp, %edi
-; FALLBACK18-NEXT: shrxl %eax, %ebx, %ebx
-; FALLBACK18-NEXT: movl 12(%esp,%esi), %esi
-; FALLBACK18-NEXT: shrxl %eax, %esi, %eax
-; FALLBACK18-NEXT: addl %esi, %esi
-; FALLBACK18-NEXT: shlxl %edx, %esi, %edx
-; FALLBACK18-NEXT: orl %ebx, %edx
+; FALLBACK18-NEXT: movzbl %bl, %edi
+; FALLBACK18-NEXT: movl 4(%esp,%edi), %ebx
+; FALLBACK18-NEXT: movl 8(%esp,%edi), %esi
+; FALLBACK18-NEXT: shrxl %ecx, %ebx, %ebp
+; FALLBACK18-NEXT: notb %al
+; FALLBACK18-NEXT: leal (%esi,%esi), %edx
+; FALLBACK18-NEXT: shlxl %eax, %edx, %edx
+; FALLBACK18-NEXT: orl %ebp, %edx
+; FALLBACK18-NEXT: shrxl %ecx, (%esp,%edi), %ebp
+; FALLBACK18-NEXT: addl %ebx, %ebx
+; FALLBACK18-NEXT: shlxl %eax, %ebx, %ebx
+; FALLBACK18-NEXT: orl %ebp, %ebx
+; FALLBACK18-NEXT: movl 12(%esp,%edi), %edi
+; FALLBACK18-NEXT: leal (%edi,%edi), %ebp
+; FALLBACK18-NEXT: shlxl %eax, %ebp, %eax
+; FALLBACK18-NEXT: shrxl %ecx, %esi, %esi
+; FALLBACK18-NEXT: orl %esi, %eax
+; FALLBACK18-NEXT: shrxl %ecx, %edi, %ecx
; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %esi
-; FALLBACK18-NEXT: movl %eax, 12(%esi)
-; FALLBACK18-NEXT: movl %edx, 8(%esi)
-; FALLBACK18-NEXT: movl %edi, (%esi)
-; FALLBACK18-NEXT: movl %ecx, 4(%esi)
+; FALLBACK18-NEXT: movl %ecx, 12(%esi)
+; FALLBACK18-NEXT: movl %eax, 8(%esi)
+; FALLBACK18-NEXT: movl %ebx, (%esi)
+; FALLBACK18-NEXT: movl %edx, 4(%esi)
; FALLBACK18-NEXT: addl $44, %esp
; FALLBACK18-NEXT: popl %esi
; FALLBACK18-NEXT: popl %edi
@@ -962,42 +962,43 @@ define void @lshr_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK22-NEXT: pushl %ebx
; FALLBACK22-NEXT: pushl %edi
; FALLBACK22-NEXT: pushl %esi
-; FALLBACK22-NEXT: subl $44, %esp
+; FALLBACK22-NEXT: subl $60, %esp
; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %eax
; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %ecx
; FALLBACK22-NEXT: movups (%ecx), %xmm0
-; FALLBACK22-NEXT: movzbl (%eax), %ecx
-; FALLBACK22-NEXT: movl %ecx, %eax
+; FALLBACK22-NEXT: movzbl (%eax), %edx
+; FALLBACK22-NEXT: movl %edx, %eax
; FALLBACK22-NEXT: shlb $3, %al
; FALLBACK22-NEXT: xorps %xmm1, %xmm1
; FALLBACK22-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
-; FALLBACK22-NEXT: movaps %xmm0, (%esp)
-; FALLBACK22-NEXT: andb $12, %cl
-; FALLBACK22-NEXT: movzbl %cl, %edi
-; FALLBACK22-NEXT: shrxl %eax, (%esp,%edi), %ebx
+; FALLBACK22-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; FALLBACK22-NEXT: movl %eax, %ecx
-; FALLBACK22-NEXT: notb %cl
-; FALLBACK22-NEXT: movl 4(%esp,%edi), %ebp
-; FALLBACK22-NEXT: movl 8(%esp,%edi), %esi
-; FALLBACK22-NEXT: leal (%ebp,%ebp), %edx
-; FALLBACK22-NEXT: shlxl %ecx, %edx, %edx
-; FALLBACK22-NEXT: orl %ebx, %edx
-; FALLBACK22-NEXT: shrxl %eax, %esi, %ebx
-; FALLBACK22-NEXT: shrxl %eax, %ebp, %ebp
-; FALLBACK22-NEXT: movl 12(%esp,%edi), %edi
-; FALLBACK22-NEXT: shrxl %eax, %edi, %eax
-; FALLBACK22-NEXT: addl %edi, %edi
-; FALLBACK22-NEXT: shlxl %ecx, %edi, %edi
-; FALLBACK22-NEXT: orl %ebx, %edi
-; FALLBACK22-NEXT: addl %esi, %esi
-; FALLBACK22-NEXT: shlxl %ecx, %esi, %ecx
-; FALLBACK22-NEXT: orl %ebp, %ecx
+; FALLBACK22-NEXT: andb $12, %dl
+; FALLBACK22-NEXT: movzbl %dl, %edi
+; FALLBACK22-NEXT: shrxl %ecx, 16(%esp,%edi), %ebp
+; FALLBACK22-NEXT: notb %al
+; FALLBACK22-NEXT: movl 20(%esp,%edi), %edx
+; FALLBACK22-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl 24(%esp,%edi), %ebx
+; FALLBACK22-NEXT: addl %edx, %edx
+; FALLBACK22-NEXT: shlxl %eax, %edx, %edx
+; FALLBACK22-NEXT: orl %ebp, %edx
+; FALLBACK22-NEXT: movl 28(%esp,%edi), %ebp
+; FALLBACK22-NEXT: leal (%ebp,%ebp), %edi
+; FALLBACK22-NEXT: shlxl %eax, %edi, %edi
+; FALLBACK22-NEXT: shrxl %ecx, %ebx, %esi
+; FALLBACK22-NEXT: orl %esi, %edi
+; FALLBACK22-NEXT: addl %ebx, %ebx
+; FALLBACK22-NEXT: shlxl %eax, %ebx, %eax
+; FALLBACK22-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; FALLBACK22-NEXT: orl %esi, %eax
+; FALLBACK22-NEXT: shrxl %ecx, %ebp, %ecx
; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %esi
-; FALLBACK22-NEXT: movl %eax, 12(%esi)
-; FALLBACK22-NEXT: movl %ecx, 4(%esi)
+; FALLBACK22-NEXT: movl %ecx, 12(%esi)
+; FALLBACK22-NEXT: movl %eax, 4(%esi)
; FALLBACK22-NEXT: movl %edi, 8(%esi)
; FALLBACK22-NEXT: movl %edx, (%esi)
-; FALLBACK22-NEXT: addl $44, %esp
+; FALLBACK22-NEXT: addl $60, %esp
; FALLBACK22-NEXT: popl %esi
; FALLBACK22-NEXT: popl %edi
; FALLBACK22-NEXT: popl %ebx
@@ -1152,42 +1153,43 @@ define void @lshr_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK26-NEXT: pushl %ebx
; FALLBACK26-NEXT: pushl %edi
; FALLBACK26-NEXT: pushl %esi
-; FALLBACK26-NEXT: subl $44, %esp
+; FALLBACK26-NEXT: subl $60, %esp
; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %eax
; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %ecx
; FALLBACK26-NEXT: vmovups (%ecx), %xmm0
-; FALLBACK26-NEXT: movzbl (%eax), %ecx
-; FALLBACK26-NEXT: movl %ecx, %eax
+; FALLBACK26-NEXT: movzbl (%eax), %edx
+; FALLBACK26-NEXT: movl %edx, %eax
; FALLBACK26-NEXT: shlb $3, %al
; FALLBACK26-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FALLBACK26-NEXT: vmovaps %xmm1, {{[0-9]+}}(%esp)
-; FALLBACK26-NEXT: vmovaps %xmm0, (%esp)
-; FALLBACK26-NEXT: andb $12, %cl
-; FALLBACK26-NEXT: movzbl %cl, %edi
-; FALLBACK26-NEXT: shrxl %eax, (%esp,%edi), %ebx
+; FALLBACK26-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp)
; FALLBACK26-NEXT: movl %eax, %ecx
-; FALLBACK26-NEXT: notb %cl
-; FALLBACK26-NEXT: movl 4(%esp,%edi), %ebp
-; FALLBACK26-NEXT: movl 8(%esp,%edi), %esi
-; FALLBACK26-NEXT: leal (%ebp,%ebp), %edx
-; FALLBACK26-NEXT: shlxl %ecx, %edx, %edx
-; FALLBACK26-NEXT: orl %ebx, %edx
-; FALLBACK26-NEXT: shrxl %eax, %esi, %ebx
-; FALLBACK26-NEXT: shrxl %eax, %ebp, %ebp
-; FALLBACK26-NEXT: movl 12(%esp,%edi), %edi
-; FALLBACK26-NEXT: shrxl %eax, %edi, %eax
-; FALLBACK26-NEXT: addl %edi, %edi
-; FALLBACK26-NEXT: shlxl %ecx, %edi, %edi
-; FALLBACK26-NEXT: orl %ebx, %edi
-; FALLBACK26-NEXT: addl %esi, %esi
-; FALLBACK26-NEXT: shlxl %ecx, %esi, %ecx
-; FALLBACK26-NEXT: orl %ebp, %ecx
+; FALLBACK26-NEXT: andb $12, %dl
+; FALLBACK26-NEXT: movzbl %dl, %edi
+; FALLBACK26-NEXT: shrxl %ecx, 16(%esp,%edi), %ebp
+; FALLBACK26-NEXT: notb %al
+; FALLBACK26-NEXT: movl 20(%esp,%edi), %edx
+; FALLBACK26-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl 24(%esp,%edi), %ebx
+; FALLBACK26-NEXT: addl %edx, %edx
+; FALLBACK26-NEXT: shlxl %eax, %edx, %edx
+; FALLBACK26-NEXT: orl %ebp, %edx
+; FALLBACK26-NEXT: movl 28(%esp,%edi), %ebp
+; FALLBACK26-NEXT: leal (%ebp,%ebp), %edi
+; FALLBACK26-NEXT: shlxl %eax, %edi, %edi
+; FALLBACK26-NEXT: shrxl %ecx, %ebx, %esi
+; FALLBACK26-NEXT: orl %esi, %edi
+; FALLBACK26-NEXT: addl %ebx, %ebx
+; FALLBACK26-NEXT: shlxl %eax, %ebx, %eax
+; FALLBACK26-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; FALLBACK26-NEXT: orl %esi, %eax
+; FALLBACK26-NEXT: shrxl %ecx, %ebp, %ecx
; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %esi
-; FALLBACK26-NEXT: movl %eax, 12(%esi)
-; FALLBACK26-NEXT: movl %ecx, 4(%esi)
+; FALLBACK26-NEXT: movl %ecx, 12(%esi)
+; FALLBACK26-NEXT: movl %eax, 4(%esi)
; FALLBACK26-NEXT: movl %edi, 8(%esi)
; FALLBACK26-NEXT: movl %edx, (%esi)
-; FALLBACK26-NEXT: addl $44, %esp
+; FALLBACK26-NEXT: addl $60, %esp
; FALLBACK26-NEXT: popl %esi
; FALLBACK26-NEXT: popl %edi
; FALLBACK26-NEXT: popl %ebx
@@ -1342,42 +1344,43 @@ define void @lshr_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK30-NEXT: pushl %ebx
; FALLBACK30-NEXT: pushl %edi
; FALLBACK30-NEXT: pushl %esi
-; FALLBACK30-NEXT: subl $44, %esp
+; FALLBACK30-NEXT: subl $60, %esp
; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %eax
; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %ecx
; FALLBACK30-NEXT: vmovups (%ecx), %xmm0
-; FALLBACK30-NEXT: movzbl (%eax), %ecx
-; FALLBACK30-NEXT: movl %ecx, %eax
+; FALLBACK30-NEXT: movzbl (%eax), %edx
+; FALLBACK30-NEXT: movl %edx, %eax
; FALLBACK30-NEXT: shlb $3, %al
; FALLBACK30-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FALLBACK30-NEXT: vmovaps %xmm1, {{[0-9]+}}(%esp)
-; FALLBACK30-NEXT: vmovaps %xmm0, (%esp)
-; FALLBACK30-NEXT: andb $12, %cl
-; FALLBACK30-NEXT: movzbl %cl, %edi
-; FALLBACK30-NEXT: shrxl %eax, (%esp,%edi), %ebx
+; FALLBACK30-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp)
; FALLBACK30-NEXT: movl %eax, %ecx
-; FALLBACK30-NEXT: notb %cl
-; FALLBACK30-NEXT: movl 4(%esp,%edi), %ebp
-; FALLBACK30-NEXT: movl 8(%esp,%edi), %esi
-; FALLBACK30-NEXT: leal (%ebp,%ebp), %edx
-; FALLBACK30-NEXT: shlxl %ecx, %edx, %edx
-; FALLBACK30-NEXT: orl %ebx, %edx
-; FALLBACK30-NEXT: shrxl %eax, %esi, %ebx
-; FALLBACK30-NEXT: shrxl %eax, %ebp, %ebp
-; FALLBACK30-NEXT: movl 12(%esp,%edi), %edi
-; FALLBACK30-NEXT: shrxl %eax, %edi, %eax
-; FALLBACK30-NEXT: addl %edi, %edi
-; FALLBACK30-NEXT: shlxl %ecx, %edi, %edi
-; FALLBACK30-NEXT: orl %ebx, %edi
-; FALLBACK30-NEXT: addl %esi, %esi
-; FALLBACK30-NEXT: shlxl %ecx, %esi, %ecx
-; FALLBACK30-NEXT: orl %ebp, %ecx
+; FALLBACK30-NEXT: andb $12, %dl
+; FALLBACK30-NEXT: movzbl %dl, %edi
+; FALLBACK30-NEXT: shrxl %ecx, 16(%esp,%edi), %ebp
+; FALLBACK30-NEXT: notb %al
+; FALLBACK30-NEXT: movl 20(%esp,%edi), %edx
+; FALLBACK30-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl 24(%esp,%edi), %ebx
+; FALLBACK30-NEXT: addl %edx, %edx
+; FALLBACK30-NEXT: shlxl %eax, %edx, %edx
+; FALLBACK30-NEXT: orl %ebp, %edx
+; FALLBACK30-NEXT: movl 28(%esp,%edi), %ebp
+; FALLBACK30-NEXT: leal (%ebp,%ebp), %edi
+; FALLBACK30-NEXT: shlxl %eax, %edi, %edi
+; FALLBACK30-NEXT: shrxl %ecx, %ebx, %esi
+; FALLBACK30-NEXT: orl %esi, %edi
+; FALLBACK30-NEXT: addl %ebx, %ebx
+; FALLBACK30-NEXT: shlxl %eax, %ebx, %eax
+; FALLBACK30-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; FALLBACK30-NEXT: orl %esi, %eax
+; FALLBACK30-NEXT: shrxl %ecx, %ebp, %ecx
; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %esi
-; FALLBACK30-NEXT: movl %eax, 12(%esi)
-; FALLBACK30-NEXT: movl %ecx, 4(%esi)
+; FALLBACK30-NEXT: movl %ecx, 12(%esi)
+; FALLBACK30-NEXT: movl %eax, 4(%esi)
; FALLBACK30-NEXT: movl %edi, 8(%esi)
; FALLBACK30-NEXT: movl %edx, (%esi)
-; FALLBACK30-NEXT: addl $44, %esp
+; FALLBACK30-NEXT: addl $60, %esp
; FALLBACK30-NEXT: popl %esi
; FALLBACK30-NEXT: popl %edi
; FALLBACK30-NEXT: popl %ebx
@@ -1784,41 +1787,41 @@ define void @shl_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK18-NEXT: movl 4(%ecx), %esi
; FALLBACK18-NEXT: movl 8(%ecx), %edi
; FALLBACK18-NEXT: movl 12(%ecx), %ecx
-; FALLBACK18-NEXT: movzbl (%eax), %eax
-; FALLBACK18-NEXT: movl %eax, %ebx
-; FALLBACK18-NEXT: shlb $3, %bl
+; FALLBACK18-NEXT: movzbl (%eax), %ebx
+; FALLBACK18-NEXT: movl %ebx, %eax
+; FALLBACK18-NEXT: shlb $3, %al
; FALLBACK18-NEXT: xorps %xmm0, %xmm0
; FALLBACK18-NEXT: movaps %xmm0, (%esp)
; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movl %edi, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movl %esi, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK18-NEXT: andb $12, %al
-; FALLBACK18-NEXT: negb %al
-; FALLBACK18-NEXT: movsbl %al, %edx
-; FALLBACK18-NEXT: movl 16(%esp,%edx), %edi
-; FALLBACK18-NEXT: movl 20(%esp,%edx), %ecx
-; FALLBACK18-NEXT: shlxl %ebx, %ecx, %esi
-; FALLBACK18-NEXT: shlxl %ebx, %edi, %ebp
-; FALLBACK18-NEXT: movl %ebx, %eax
+; FALLBACK18-NEXT: movl %eax, %ecx
+; FALLBACK18-NEXT: andb $12, %bl
+; FALLBACK18-NEXT: negb %bl
+; FALLBACK18-NEXT: movsbl %bl, %esi
+; FALLBACK18-NEXT: movl 16(%esp,%esi), %ebx
+; FALLBACK18-NEXT: movl 20(%esp,%esi), %edx
+; FALLBACK18-NEXT: shlxl %ecx, %edx, %edi
; FALLBACK18-NEXT: notb %al
-; FALLBACK18-NEXT: shrl %edi
-; FALLBACK18-NEXT: shrxl %eax, %edi, %edi
-; FALLBACK18-NEXT: orl %esi, %edi
-; FALLBACK18-NEXT: shlxl %ebx, 28(%esp,%edx), %esi
-; FALLBACK18-NEXT: movl 24(%esp,%edx), %edx
-; FALLBACK18-NEXT: shlxl %ebx, %edx, %ebx
+; FALLBACK18-NEXT: shlxl %ecx, %ebx, %ebp
+; FALLBACK18-NEXT: shrl %ebx
+; FALLBACK18-NEXT: shrxl %eax, %ebx, %ebx
+; FALLBACK18-NEXT: orl %edi, %ebx
+; FALLBACK18-NEXT: shlxl %ecx, 28(%esp,%esi), %edi
+; FALLBACK18-NEXT: movl 24(%esp,%esi), %esi
+; FALLBACK18-NEXT: shlxl %ecx, %esi, %ecx
+; FALLBACK18-NEXT: shrl %esi
+; FALLBACK18-NEXT: shrxl %eax, %esi, %esi
+; FALLBACK18-NEXT: orl %edi, %esi
; FALLBACK18-NEXT: shrl %edx
-; FALLBACK18-NEXT: shrxl %eax, %edx, %edx
-; FALLBACK18-NEXT: orl %esi, %edx
-; FALLBACK18-NEXT: shrl %ecx
-; FALLBACK18-NEXT: shrxl %eax, %ecx, %eax
-; FALLBACK18-NEXT: orl %ebx, %eax
+; FALLBACK18-NEXT: shrxl %eax, %edx, %eax
+; FALLBACK18-NEXT: orl %ecx, %eax
; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %ecx
; FALLBACK18-NEXT: movl %ebp, (%ecx)
; FALLBACK18-NEXT: movl %eax, 8(%ecx)
-; FALLBACK18-NEXT: movl %edx, 12(%ecx)
-; FALLBACK18-NEXT: movl %edi, 4(%ecx)
+; FALLBACK18-NEXT: movl %esi, 12(%ecx)
+; FALLBACK18-NEXT: movl %ebx, 4(%ecx)
; FALLBACK18-NEXT: addl $44, %esp
; FALLBACK18-NEXT: popl %esi
; FALLBACK18-NEXT: popl %edi
@@ -1983,39 +1986,39 @@ define void @shl_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %eax
; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %ecx
; FALLBACK22-NEXT: movups (%ecx), %xmm0
-; FALLBACK22-NEXT: movzbl (%eax), %ecx
-; FALLBACK22-NEXT: movl %ecx, %eax
+; FALLBACK22-NEXT: movzbl (%eax), %edx
+; FALLBACK22-NEXT: movl %edx, %eax
; FALLBACK22-NEXT: shlb $3, %al
; FALLBACK22-NEXT: xorps %xmm1, %xmm1
; FALLBACK22-NEXT: movaps %xmm1, (%esp)
; FALLBACK22-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
-; FALLBACK22-NEXT: andb $12, %cl
-; FALLBACK22-NEXT: negb %cl
-; FALLBACK22-NEXT: movsbl %cl, %ecx
-; FALLBACK22-NEXT: shlxl %eax, 28(%esp,%ecx), %esi
-; FALLBACK22-NEXT: movl 24(%esp,%ecx), %edx
-; FALLBACK22-NEXT: shlxl %eax, %edx, %edi
-; FALLBACK22-NEXT: movl %eax, %ebx
-; FALLBACK22-NEXT: notb %bl
-; FALLBACK22-NEXT: shrl %edx
-; FALLBACK22-NEXT: shrxl %ebx, %edx, %edx
-; FALLBACK22-NEXT: orl %esi, %edx
-; FALLBACK22-NEXT: movl 20(%esp,%ecx), %esi
-; FALLBACK22-NEXT: movl %esi, %ebp
+; FALLBACK22-NEXT: movl %eax, %ecx
+; FALLBACK22-NEXT: andb $12, %dl
+; FALLBACK22-NEXT: negb %dl
+; FALLBACK22-NEXT: movsbl %dl, %edx
+; FALLBACK22-NEXT: shlxl %ecx, 28(%esp,%edx), %edi
+; FALLBACK22-NEXT: notb %al
+; FALLBACK22-NEXT: movl 24(%esp,%edx), %esi
+; FALLBACK22-NEXT: shlxl %ecx, %esi, %ebx
+; FALLBACK22-NEXT: shrl %esi
+; FALLBACK22-NEXT: shrxl %eax, %esi, %esi
+; FALLBACK22-NEXT: orl %edi, %esi
+; FALLBACK22-NEXT: movl 20(%esp,%edx), %edi
+; FALLBACK22-NEXT: movl %edi, %ebp
; FALLBACK22-NEXT: shrl %ebp
-; FALLBACK22-NEXT: shrxl %ebx, %ebp, %ebp
-; FALLBACK22-NEXT: orl %edi, %ebp
-; FALLBACK22-NEXT: shlxl %eax, %esi, %esi
-; FALLBACK22-NEXT: movl 16(%esp,%ecx), %ecx
-; FALLBACK22-NEXT: shlxl %eax, %ecx, %eax
-; FALLBACK22-NEXT: shrl %ecx
-; FALLBACK22-NEXT: shrxl %ebx, %ecx, %ecx
-; FALLBACK22-NEXT: orl %esi, %ecx
-; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %esi
-; FALLBACK22-NEXT: movl %eax, (%esi)
-; FALLBACK22-NEXT: movl %ecx, 4(%esi)
-; FALLBACK22-NEXT: movl %ebp, 8(%esi)
-; FALLBACK22-NEXT: movl %edx, 12(%esi)
+; FALLBACK22-NEXT: shrxl %eax, %ebp, %ebp
+; FALLBACK22-NEXT: orl %ebx, %ebp
+; FALLBACK22-NEXT: shlxl %ecx, %edi, %edi
+; FALLBACK22-NEXT: movl 16(%esp,%edx), %edx
+; FALLBACK22-NEXT: shlxl %ecx, %edx, %ecx
+; FALLBACK22-NEXT: shrl %edx
+; FALLBACK22-NEXT: shrxl %eax, %edx, %eax
+; FALLBACK22-NEXT: orl %edi, %eax
+; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK22-NEXT: movl %ecx, (%edx)
+; FALLBACK22-NEXT: movl %eax, 4(%edx)
+; FALLBACK22-NEXT: movl %ebp, 8(%edx)
+; FALLBACK22-NEXT: movl %esi, 12(%edx)
; FALLBACK22-NEXT: addl $44, %esp
; FALLBACK22-NEXT: popl %esi
; FALLBACK22-NEXT: popl %edi
@@ -2175,39 +2178,39 @@ define void @shl_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %eax
; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %ecx
; FALLBACK26-NEXT: vmovups (%ecx), %xmm0
-; FALLBACK26-NEXT: movzbl (%eax), %ecx
-; FALLBACK26-NEXT: movl %ecx, %eax
+; FALLBACK26-NEXT: movzbl (%eax), %edx
+; FALLBACK26-NEXT: movl %edx, %eax
; FALLBACK26-NEXT: shlb $3, %al
; FALLBACK26-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FALLBACK26-NEXT: vmovaps %xmm1, (%esp)
; FALLBACK26-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp)
-; FALLBACK26-NEXT: andb $12, %cl
-; FALLBACK26-NEXT: negb %cl
-; FALLBACK26-NEXT: movsbl %cl, %ecx
-; FALLBACK26-NEXT: shlxl %eax, 28(%esp,%ecx), %esi
-; FALLBACK26-NEXT: movl 24(%esp,%ecx), %edx
-; FALLBACK26-NEXT: shlxl %eax, %edx, %edi
-; FALLBACK26-NEXT: movl %eax, %ebx
-; FALLBACK26-NEXT: notb %bl
-; FALLBACK26-NEXT: shrl %edx
-; FALLBACK26-NEXT: shrxl %ebx, %edx, %edx
-; FALLBACK26-NEXT: orl %esi, %edx
-; FALLBACK26-NEXT: movl 20(%esp,%ecx), %esi
-; FALLBACK26-NEXT: movl %esi, %ebp
+; FALLBACK26-NEXT: movl %eax, %ecx
+; FALLBACK26-NEXT: andb $12, %dl
+; FALLBACK26-NEXT: negb %dl
+; FALLBACK26-NEXT: movsbl %dl, %edx
+; FALLBACK26-NEXT: shlxl %ecx, 28(%esp,%edx), %edi
+; FALLBACK26-NEXT: notb %al
+; FALLBACK26-NEXT: movl 24(%esp,%edx), %esi
+; FALLBACK26-NEXT: shlxl %ecx, %esi, %ebx
+; FALLBACK26-NEXT: shrl %esi
+; FALLBACK26-NEXT: shrxl %eax, %esi, %esi
+; FALLBACK26-NEXT: orl %edi, %esi
+; FALLBACK26-NEXT: movl 20(%esp,%edx), %edi
+; FALLBACK26-NEXT: movl %edi, %ebp
; FALLBACK26-NEXT: shrl %ebp
-; FALLBACK26-NEXT: shrxl %ebx, %ebp, %ebp
-; FALLBACK26-NEXT: orl %edi, %ebp
-; FALLBACK26-NEXT: shlxl %eax, %esi, %esi
-; FALLBACK26-NEXT: movl 16(%esp,%ecx), %ecx
-; FALLBACK26-NEXT: shlxl %eax, %ecx, %eax
-; FALLBACK26-NEXT: shrl %ecx
-; FALLBACK26-NEXT: shrxl %ebx, %ecx, %ecx
-; FALLBACK26-NEXT: orl %esi, %ecx
-; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %esi
-; FALLBACK26-NEXT: movl %eax, (%esi)
-; FALLBACK26-NEXT: movl %ecx, 4(%esi)
-; FALLBACK26-NEXT: movl %ebp, 8(%esi)
-; FALLBACK26-NEXT: movl %edx, 12(%esi)
+; FALLBACK26-NEXT: shrxl %eax, %ebp, %ebp
+; FALLBACK26-NEXT: orl %ebx, %ebp
+; FALLBACK26-NEXT: shlxl %ecx, %edi, %edi
+; FALLBACK26-NEXT: movl 16(%esp,%edx), %edx
+; FALLBACK26-NEXT: shlxl %ecx, %edx, %ecx
+; FALLBACK26-NEXT: shrl %edx
+; FALLBACK26-NEXT: shrxl %eax, %edx, %eax
+; FALLBACK26-NEXT: orl %edi, %eax
+; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK26-NEXT: movl %ecx, (%edx)
+; FALLBACK26-NEXT: movl %eax, 4(%edx)
+; FALLBACK26-NEXT: movl %ebp, 8(%edx)
+; FALLBACK26-NEXT: movl %esi, 12(%edx)
; FALLBACK26-NEXT: addl $44, %esp
; FALLBACK26-NEXT: popl %esi
; FALLBACK26-NEXT: popl %edi
@@ -2367,39 +2370,39 @@ define void @shl_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %eax
; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %ecx
; FALLBACK30-NEXT: vmovups (%ecx), %xmm0
-; FALLBACK30-NEXT: movzbl (%eax), %ecx
-; FALLBACK30-NEXT: movl %ecx, %eax
+; FALLBACK30-NEXT: movzbl (%eax), %edx
+; FALLBACK30-NEXT: movl %edx, %eax
; FALLBACK30-NEXT: shlb $3, %al
; FALLBACK30-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FALLBACK30-NEXT: vmovaps %xmm1, (%esp)
; FALLBACK30-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp)
-; FALLBACK30-NEXT: andb $12, %cl
-; FALLBACK30-NEXT: negb %cl
-; FALLBACK30-NEXT: movsbl %cl, %ecx
-; FALLBACK30-NEXT: shlxl %eax, 28(%esp,%ecx), %esi
-; FALLBACK30-NEXT: movl 24(%esp,%ecx), %edx
-; FALLBACK30-NEXT: shlxl %eax, %edx, %edi
-; FALLBACK30-NEXT: movl %eax, %ebx
-; FALLBACK30-NEXT: notb %bl
-; FALLBACK30-NEXT: shrl %edx
-; FALLBACK30-NEXT: shrxl %ebx, %edx, %edx
-; FALLBACK30-NEXT: orl %esi, %edx
-; FALLBACK30-NEXT: movl 20(%esp,%ecx), %esi
-; FALLBACK30-NEXT: movl %esi, %ebp
+; FALLBACK30-NEXT: movl %eax, %ecx
+; FALLBACK30-NEXT: andb $12, %dl
+; FALLBACK30-NEXT: negb %dl
+; FALLBACK30-NEXT: movsbl %dl, %edx
+; FALLBACK30-NEXT: shlxl %ecx, 28(%esp,%edx), %edi
+; FALLBACK30-NEXT: notb %al
+; FALLBACK30-NEXT: movl 24(%esp,%edx), %esi
+; FALLBACK30-NEXT: shlxl %ecx, %esi, %ebx
+; FALLBACK30-NEXT: shrl %esi
+; FALLBACK30-NEXT: shrxl %eax, %esi, %esi
+; FALLBACK30-NEXT: orl %edi, %esi
+; FALLBACK30-NEXT: movl 20(%esp,%edx), %edi
+; FALLBACK30-NEXT: movl %edi, %ebp
; FALLBACK30-NEXT: shrl %ebp
-; FALLBACK30-NEXT: shrxl %ebx, %ebp, %ebp
-; FALLBACK30-NEXT: orl %edi, %ebp
-; FALLBACK30-NEXT: shlxl %eax, %esi, %esi
-; FALLBACK30-NEXT: movl 16(%esp,%ecx), %ecx
-; FALLBACK30-NEXT: shlxl %eax, %ecx, %eax
-; FALLBACK30-NEXT: shrl %ecx
-; FALLBACK30-NEXT: shrxl %ebx, %ecx, %ecx
-; FALLBACK30-NEXT: orl %esi, %ecx
-; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %esi
-; FALLBACK30-NEXT: movl %eax, (%esi)
-; FALLBACK30-NEXT: movl %ecx, 4(%esi)
-; FALLBACK30-NEXT: movl %ebp, 8(%esi)
-; FALLBACK30-NEXT: movl %edx, 12(%esi)
+; FALLBACK30-NEXT: shrxl %eax, %ebp, %ebp
+; FALLBACK30-NEXT: orl %ebx, %ebp
+; FALLBACK30-NEXT: shlxl %ecx, %edi, %edi
+; FALLBACK30-NEXT: movl 16(%esp,%edx), %edx
+; FALLBACK30-NEXT: shlxl %ecx, %edx, %ecx
+; FALLBACK30-NEXT: shrl %edx
+; FALLBACK30-NEXT: shrxl %eax, %edx, %eax
+; FALLBACK30-NEXT: orl %edi, %eax
+; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK30-NEXT: movl %ecx, (%edx)
+; FALLBACK30-NEXT: movl %eax, 4(%edx)
+; FALLBACK30-NEXT: movl %ebp, 8(%edx)
+; FALLBACK30-NEXT: movl %esi, 12(%edx)
; FALLBACK30-NEXT: addl $44, %esp
; FALLBACK30-NEXT: popl %esi
; FALLBACK30-NEXT: popl %edi
@@ -2833,31 +2836,31 @@ define void @ashr_16bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; X86-NO-SHLD-HAVE-BMI2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-NO-SHLD-HAVE-BMI2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-NO-SHLD-HAVE-BMI2-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NO-SHLD-HAVE-BMI2-NEXT: movl %eax, %ecx
; X86-NO-SHLD-HAVE-BMI2-NEXT: andb $12, %bl
-; X86-NO-SHLD-HAVE-BMI2-NEXT: movzbl %bl, %esi
-; X86-NO-SHLD-HAVE-BMI2-NEXT: movl 4(%esp,%esi), %edi
-; X86-NO-SHLD-HAVE-BMI2-NEXT: movl 8(%esp,%esi), %ebx
-; X86-NO-SHLD-HAVE-BMI2-NEXT: shrxl %eax, %edi, %ebp
-; X86-NO-SHLD-HAVE-BMI2-NEXT: movl %eax, %edx
-; X86-NO-SHLD-HAVE-BMI2-NEXT: notb %dl
-; X86-NO-SHLD-HAVE-BMI2-NEXT: leal (%ebx,%ebx), %ecx
-; X86-NO-SHLD-HAVE-BMI2-NEXT: shlxl %edx, %ecx, %ecx
-; X86-NO-SHLD-HAVE-BMI2-NEXT: orl %ebp, %ecx
-; X86-NO-SHLD-HAVE-BMI2-NEXT: shrxl %eax, (%esp,%esi), %ebp
-; X86-NO-SHLD-HAVE-BMI2-NEXT: addl %edi, %edi
-; X86-NO-SHLD-HAVE-BMI2-NEXT: shlxl %edx, %edi, %edi
-; X86-NO-SHLD-HAVE-BMI2-NEXT: orl %ebp, %edi
-; X86-NO-SHLD-HAVE-BMI2-NEXT: shrxl %eax, %ebx, %ebx
-; X86-NO-SHLD-HAVE-BMI2-NEXT: movl 12(%esp,%esi), %esi
-; X86-NO-SHLD-HAVE-BMI2-NEXT: sarxl %eax, %esi, %eax
-; X86-NO-SHLD-HAVE-BMI2-NEXT: addl %esi, %esi
-; X86-NO-SHLD-HAVE-BMI2-NEXT: shlxl %edx, %esi, %edx
-; X86-NO-SHLD-HAVE-BMI2-NEXT: orl %ebx, %edx
+; X86-NO-SHLD-HAVE-BMI2-NEXT: movzbl %bl, %edi
+; X86-NO-SHLD-HAVE-BMI2-NEXT: movl 4(%esp,%edi), %ebx
+; X86-NO-SHLD-HAVE-BMI2-NEXT: movl 8(%esp,%edi), %esi
+; X86-NO-SHLD-HAVE-BMI2-NEXT: shrxl %ecx, %ebx, %ebp
+; X86-NO-SHLD-HAVE-BMI2-NEXT: notb %al
+; X86-NO-SHLD-HAVE-BMI2-NEXT: leal (%esi,%esi), %edx
+; X86-NO-SHLD-HAVE-BMI2-NEXT: shlxl %eax, %edx, %edx
+; X86-NO-SHLD-HAVE-BMI2-NEXT: orl %ebp, %edx
+; X86-NO-SHLD-HAVE-BMI2-NEXT: shrxl %ecx, (%esp,%edi), %ebp
+; X86-NO-SHLD-HAVE-BMI2-NEXT: addl %ebx, %ebx
+; X86-NO-SHLD-HAVE-BMI2-NEXT: shlxl %eax, %ebx, %ebx
+; X86-NO-SHLD-HAVE-BMI2-NEXT: orl %ebp, %ebx
+; X86-NO-SHLD-HAVE-BMI2-NEXT: movl 12(%esp,%edi), %edi
+; X86-NO-SHLD-HAVE-BMI2-NEXT: leal (%edi,%edi), %ebp
+; X86-NO-SHLD-HAVE-BMI2-NEXT: shlxl %eax, %ebp, %eax
+; X86-NO-SHLD-HAVE-BMI2-NEXT: shrxl %ecx, %esi, %esi
+; X86-NO-SHLD-HAVE-BMI2-NEXT: orl %esi, %eax
+; X86-NO-SHLD-HAVE-BMI2-NEXT: sarxl %ecx, %edi, %ecx
; X86-NO-SHLD-HAVE-BMI2-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-NO-SHLD-HAVE-BMI2-NEXT: movl %eax, 12(%esi)
-; X86-NO-SHLD-HAVE-BMI2-NEXT: movl %edx, 8(%esi)
-; X86-NO-SHLD-HAVE-BMI2-NEXT: movl %edi, (%esi)
-; X86-NO-SHLD-HAVE-BMI2-NEXT: movl %ecx, 4(%esi)
+; X86-NO-SHLD-HAVE-BMI2-NEXT: movl %ecx, 12(%esi)
+; X86-NO-SHLD-HAVE-BMI2-NEXT: movl %eax, 8(%esi)
+; X86-NO-SHLD-HAVE-BMI2-NEXT: movl %ebx, (%esi)
+; X86-NO-SHLD-HAVE-BMI2-NEXT: movl %edx, 4(%esi)
; X86-NO-SHLD-HAVE-BMI2-NEXT: addl $44, %esp
; X86-NO-SHLD-HAVE-BMI2-NEXT: popl %esi
; X86-NO-SHLD-HAVE-BMI2-NEXT: popl %edi
@@ -3208,30 +3211,30 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK2-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
; FALLBACK2-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
; FALLBACK2-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movl %eax, %ecx
; FALLBACK2-NEXT: andb $24, %sil
-; FALLBACK2-NEXT: movzbl %sil, %ecx
-; FALLBACK2-NEXT: movq -64(%rsp,%rcx), %rsi
-; FALLBACK2-NEXT: movq -56(%rsp,%rcx), %rdi
-; FALLBACK2-NEXT: shrxq %rax, %rsi, %r8
-; FALLBACK2-NEXT: shrxq %rax, -72(%rsp,%rcx), %r9
-; FALLBACK2-NEXT: shrxq %rax, %rdi, %r10
-; FALLBACK2-NEXT: movq -48(%rsp,%rcx), %rcx
-; FALLBACK2-NEXT: shrxq %rax, %rcx, %r11
-; FALLBACK2-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK2-NEXT: movzbl %sil, %esi
+; FALLBACK2-NEXT: movq -64(%rsp,%rsi), %rdi
+; FALLBACK2-NEXT: movq -56(%rsp,%rsi), %r8
+; FALLBACK2-NEXT: shrxq %rcx, %rdi, %r9
; FALLBACK2-NEXT: notb %al
+; FALLBACK2-NEXT: leaq (%r8,%r8), %r10
+; FALLBACK2-NEXT: shlxq %rax, %r10, %r10
+; FALLBACK2-NEXT: orq %r9, %r10
+; FALLBACK2-NEXT: shrxq %rcx, -72(%rsp,%rsi), %r9
; FALLBACK2-NEXT: addq %rdi, %rdi
; FALLBACK2-NEXT: shlxq %rax, %rdi, %rdi
-; FALLBACK2-NEXT: orq %r8, %rdi
-; FALLBACK2-NEXT: addq %rsi, %rsi
-; FALLBACK2-NEXT: shlxq %rax, %rsi, %rsi
-; FALLBACK2-NEXT: orq %r9, %rsi
-; FALLBACK2-NEXT: addq %rcx, %rcx
-; FALLBACK2-NEXT: shlxq %rax, %rcx, %rax
-; FALLBACK2-NEXT: orq %r10, %rax
-; FALLBACK2-NEXT: movq %r11, 24(%rdx)
+; FALLBACK2-NEXT: orq %r9, %rdi
+; FALLBACK2-NEXT: shrxq %rcx, %r8, %r8
+; FALLBACK2-NEXT: movq -48(%rsp,%rsi), %rsi
+; FALLBACK2-NEXT: leaq (%rsi,%rsi), %r9
+; FALLBACK2-NEXT: shlxq %rax, %r9, %rax
+; FALLBACK2-NEXT: orq %r8, %rax
+; FALLBACK2-NEXT: shrxq %rcx, %rsi, %rcx
+; FALLBACK2-NEXT: movq %rcx, 24(%rdx)
; FALLBACK2-NEXT: movq %rax, 16(%rdx)
-; FALLBACK2-NEXT: movq %rsi, (%rdx)
-; FALLBACK2-NEXT: movq %rdi, 8(%rdx)
+; FALLBACK2-NEXT: movq %rdi, (%rdx)
+; FALLBACK2-NEXT: movq %r10, 8(%rdx)
; FALLBACK2-NEXT: retq
;
; FALLBACK3-LABEL: lshr_32bytes:
@@ -3355,30 +3358,30 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK6-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movl %eax, %esi
; FALLBACK6-NEXT: andb $24, %cl
; FALLBACK6-NEXT: movzbl %cl, %ecx
-; FALLBACK6-NEXT: shrxq %rax, -72(%rsp,%rcx), %rsi
-; FALLBACK6-NEXT: movq -64(%rsp,%rcx), %rdi
-; FALLBACK6-NEXT: movq -56(%rsp,%rcx), %r8
-; FALLBACK6-NEXT: shrxq %rax, %r8, %r9
-; FALLBACK6-NEXT: movq -48(%rsp,%rcx), %rcx
-; FALLBACK6-NEXT: shrxq %rax, %rdi, %r10
-; FALLBACK6-NEXT: shrxq %rax, %rcx, %r11
-; FALLBACK6-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK6-NEXT: shrxq %rsi, -72(%rsp,%rcx), %rdi
; FALLBACK6-NEXT: notb %al
-; FALLBACK6-NEXT: addq %rdi, %rdi
-; FALLBACK6-NEXT: shlxq %rax, %rdi, %rdi
-; FALLBACK6-NEXT: orq %rsi, %rdi
-; FALLBACK6-NEXT: addq %rcx, %rcx
-; FALLBACK6-NEXT: shlxq %rax, %rcx, %rcx
-; FALLBACK6-NEXT: orq %r9, %rcx
-; FALLBACK6-NEXT: addq %r8, %r8
-; FALLBACK6-NEXT: shlxq %rax, %r8, %rax
-; FALLBACK6-NEXT: orq %r10, %rax
-; FALLBACK6-NEXT: movq %r11, 24(%rdx)
+; FALLBACK6-NEXT: movq -64(%rsp,%rcx), %r8
+; FALLBACK6-NEXT: movq -56(%rsp,%rcx), %r9
+; FALLBACK6-NEXT: leaq (%r8,%r8), %r10
+; FALLBACK6-NEXT: shlxq %rax, %r10, %r10
+; FALLBACK6-NEXT: orq %rdi, %r10
+; FALLBACK6-NEXT: shrxq %rsi, %r9, %rdi
+; FALLBACK6-NEXT: movq -48(%rsp,%rcx), %rcx
+; FALLBACK6-NEXT: leaq (%rcx,%rcx), %r11
+; FALLBACK6-NEXT: shlxq %rax, %r11, %r11
+; FALLBACK6-NEXT: orq %rdi, %r11
+; FALLBACK6-NEXT: shrxq %rsi, %r8, %rdi
+; FALLBACK6-NEXT: addq %r9, %r9
+; FALLBACK6-NEXT: shlxq %rax, %r9, %rax
+; FALLBACK6-NEXT: orq %rdi, %rax
+; FALLBACK6-NEXT: shrxq %rsi, %rcx, %rcx
+; FALLBACK6-NEXT: movq %rcx, 24(%rdx)
; FALLBACK6-NEXT: movq %rax, 8(%rdx)
-; FALLBACK6-NEXT: movq %rcx, 16(%rdx)
-; FALLBACK6-NEXT: movq %rdi, (%rdx)
+; FALLBACK6-NEXT: movq %r11, 16(%rdx)
+; FALLBACK6-NEXT: movq %r10, (%rdx)
; FALLBACK6-NEXT: retq
;
; FALLBACK7-LABEL: lshr_32bytes:
@@ -3487,35 +3490,35 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK10-LABEL: lshr_32bytes:
; FALLBACK10: # %bb.0:
; FALLBACK10-NEXT: vmovups (%rdi), %ymm0
-; FALLBACK10-NEXT: movzbl (%rsi), %ecx
-; FALLBACK10-NEXT: leal (,%rcx,8), %eax
+; FALLBACK10-NEXT: movzbl (%rsi), %eax
+; FALLBACK10-NEXT: leal (,%rax,8), %ecx
; FALLBACK10-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FALLBACK10-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
; FALLBACK10-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; FALLBACK10-NEXT: andb $24, %cl
-; FALLBACK10-NEXT: movzbl %cl, %ecx
-; FALLBACK10-NEXT: shrxq %rax, -72(%rsp,%rcx), %rsi
-; FALLBACK10-NEXT: movq -64(%rsp,%rcx), %rdi
-; FALLBACK10-NEXT: movq -56(%rsp,%rcx), %r8
-; FALLBACK10-NEXT: shrxq %rax, %r8, %r9
-; FALLBACK10-NEXT: movq -48(%rsp,%rcx), %rcx
-; FALLBACK10-NEXT: shrxq %rax, %rdi, %r10
-; FALLBACK10-NEXT: shrxq %rax, %rcx, %r11
-; FALLBACK10-NEXT: # kill: def $al killed $al killed $rax def $rax
-; FALLBACK10-NEXT: notb %al
-; FALLBACK10-NEXT: addq %rdi, %rdi
-; FALLBACK10-NEXT: shlxq %rax, %rdi, %rdi
-; FALLBACK10-NEXT: orq %rsi, %rdi
-; FALLBACK10-NEXT: addq %rcx, %rcx
-; FALLBACK10-NEXT: shlxq %rax, %rcx, %rcx
-; FALLBACK10-NEXT: orq %r9, %rcx
-; FALLBACK10-NEXT: addq %r8, %r8
-; FALLBACK10-NEXT: shlxq %rax, %r8, %rax
-; FALLBACK10-NEXT: orq %r10, %rax
-; FALLBACK10-NEXT: movq %r11, 24(%rdx)
-; FALLBACK10-NEXT: movq %rax, 8(%rdx)
-; FALLBACK10-NEXT: movq %rcx, 16(%rdx)
-; FALLBACK10-NEXT: movq %rdi, (%rdx)
+; FALLBACK10-NEXT: movl %ecx, %esi
+; FALLBACK10-NEXT: andb $24, %al
+; FALLBACK10-NEXT: movzbl %al, %eax
+; FALLBACK10-NEXT: shrxq %rsi, -72(%rsp,%rax), %rdi
+; FALLBACK10-NEXT: notb %cl
+; FALLBACK10-NEXT: movq -64(%rsp,%rax), %r8
+; FALLBACK10-NEXT: movq -56(%rsp,%rax), %r9
+; FALLBACK10-NEXT: leaq (%r8,%r8), %r10
+; FALLBACK10-NEXT: shlxq %rcx, %r10, %r10
+; FALLBACK10-NEXT: orq %rdi, %r10
+; FALLBACK10-NEXT: shrxq %rsi, %r9, %rdi
+; FALLBACK10-NEXT: movq -48(%rsp,%rax), %rax
+; FALLBACK10-NEXT: leaq (%rax,%rax), %r11
+; FALLBACK10-NEXT: shlxq %rcx, %r11, %r11
+; FALLBACK10-NEXT: orq %rdi, %r11
+; FALLBACK10-NEXT: shrxq %rsi, %r8, %rdi
+; FALLBACK10-NEXT: addq %r9, %r9
+; FALLBACK10-NEXT: shlxq %rcx, %r9, %rcx
+; FALLBACK10-NEXT: orq %rdi, %rcx
+; FALLBACK10-NEXT: shrxq %rsi, %rax, %rax
+; FALLBACK10-NEXT: movq %rax, 24(%rdx)
+; FALLBACK10-NEXT: movq %rcx, 8(%rdx)
+; FALLBACK10-NEXT: movq %r11, 16(%rdx)
+; FALLBACK10-NEXT: movq %r10, (%rdx)
; FALLBACK10-NEXT: vzeroupper
; FALLBACK10-NEXT: retq
;
@@ -3623,35 +3626,35 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK14-LABEL: lshr_32bytes:
; FALLBACK14: # %bb.0:
; FALLBACK14-NEXT: vmovups (%rdi), %ymm0
-; FALLBACK14-NEXT: movzbl (%rsi), %ecx
-; FALLBACK14-NEXT: leal (,%rcx,8), %eax
+; FALLBACK14-NEXT: movzbl (%rsi), %eax
+; FALLBACK14-NEXT: leal (,%rax,8), %ecx
; FALLBACK14-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FALLBACK14-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
; FALLBACK14-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; FALLBACK14-NEXT: andb $24, %cl
-; FALLBACK14-NEXT: movzbl %cl, %ecx
-; FALLBACK14-NEXT: shrxq %rax, -72(%rsp,%rcx), %rsi
-; FALLBACK14-NEXT: movq -64(%rsp,%rcx), %rdi
-; FALLBACK14-NEXT: movq -56(%rsp,%rcx), %r8
-; FALLBACK14-NEXT: shrxq %rax, %r8, %r9
-; FALLBACK14-NEXT: movq -48(%rsp,%rcx), %rcx
-; FALLBACK14-NEXT: shrxq %rax, %rdi, %r10
-; FALLBACK14-NEXT: shrxq %rax, %rcx, %r11
-; FALLBACK14-NEXT: # kill: def $al killed $al killed $rax def $rax
-; FALLBACK14-NEXT: notb %al
-; FALLBACK14-NEXT: addq %rdi, %rdi
-; FALLBACK14-NEXT: shlxq %rax, %rdi, %rdi
-; FALLBACK14-NEXT: orq %rsi, %rdi
-; FALLBACK14-NEXT: addq %rcx, %rcx
-; FALLBACK14-NEXT: shlxq %rax, %rcx, %rcx
-; FALLBACK14-NEXT: orq %r9, %rcx
-; FALLBACK14-NEXT: addq %r8, %r8
-; FALLBACK14-NEXT: shlxq %rax, %r8, %rax
-; FALLBACK14-NEXT: orq %r10, %rax
-; FALLBACK14-NEXT: movq %r11, 24(%rdx)
-; FALLBACK14-NEXT: movq %rax, 8(%rdx)
-; FALLBACK14-NEXT: movq %rcx, 16(%rdx)
-; FALLBACK14-NEXT: movq %rdi, (%rdx)
+; FALLBACK14-NEXT: movl %ecx, %esi
+; FALLBACK14-NEXT: andb $24, %al
+; FALLBACK14-NEXT: movzbl %al, %eax
+; FALLBACK14-NEXT: shrxq %rsi, -72(%rsp,%rax), %rdi
+; FALLBACK14-NEXT: notb %cl
+; FALLBACK14-NEXT: movq -64(%rsp,%rax), %r8
+; FALLBACK14-NEXT: movq -56(%rsp,%rax), %r9
+; FALLBACK14-NEXT: leaq (%r8,%r8), %r10
+; FALLBACK14-NEXT: shlxq %rcx, %r10, %r10
+; FALLBACK14-NEXT: orq %rdi, %r10
+; FALLBACK14-NEXT: shrxq %rsi, %r9, %rdi
+; FALLBACK14-NEXT: movq -48(%rsp,%rax), %rax
+; FALLBACK14-NEXT: leaq (%rax,%rax), %r11
+; FALLBACK14-NEXT: shlxq %rcx, %r11, %r11
+; FALLBACK14-NEXT: orq %rdi, %r11
+; FALLBACK14-NEXT: shrxq %rsi, %r8, %rdi
+; FALLBACK14-NEXT: addq %r9, %r9
+; FALLBACK14-NEXT: shlxq %rcx, %r9, %rcx
+; FALLBACK14-NEXT: orq %rdi, %rcx
+; FALLBACK14-NEXT: shrxq %rsi, %rax, %rax
+; FALLBACK14-NEXT: movq %rax, 24(%rdx)
+; FALLBACK14-NEXT: movq %rcx, 8(%rdx)
+; FALLBACK14-NEXT: movq %r11, 16(%rdx)
+; FALLBACK14-NEXT: movq %r10, (%rdx)
; FALLBACK14-NEXT: vzeroupper
; FALLBACK14-NEXT: retq
;
@@ -3914,81 +3917,75 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK18-NEXT: movl %ebx, %eax
-; FALLBACK18-NEXT: shlb $3, %al
+; FALLBACK18-NEXT: movl %ebx, %ecx
+; FALLBACK18-NEXT: shlb $3, %cl
; FALLBACK18-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movl %ebp, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movl %edi, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %ecx, %eax
; FALLBACK18-NEXT: andb $28, %bl
-; FALLBACK18-NEXT: movzbl %bl, %edi
-; FALLBACK18-NEXT: movl 36(%esp,%edi), %esi
-; FALLBACK18-NEXT: movl 40(%esp,%edi), %ecx
-; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shrxl %eax, %esi, %edx
+; FALLBACK18-NEXT: movzbl %bl, %esi
+; FALLBACK18-NEXT: movl 36(%esp,%esi), %edx
+; FALLBACK18-NEXT: movl 40(%esp,%esi), %ebp
+; FALLBACK18-NEXT: shrxl %eax, %edx, %edi
+; FALLBACK18-NEXT: notb %cl
+; FALLBACK18-NEXT: leal (%ebp,%ebp), %ebx
+; FALLBACK18-NEXT: shlxl %ecx, %ebx, %ebx
+; FALLBACK18-NEXT: orl %edi, %ebx
+; FALLBACK18-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shrxl %eax, 32(%esp,%esi), %edi
+; FALLBACK18-NEXT: addl %edx, %edx
+; FALLBACK18-NEXT: shlxl %ecx, %edx, %edx
+; FALLBACK18-NEXT: orl %edi, %edx
; FALLBACK18-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl %eax, %edx
-; FALLBACK18-NEXT: movl %eax, %ebx
-; FALLBACK18-NEXT: notb %dl
-; FALLBACK18-NEXT: leal (%ecx,%ecx), %ebp
-; FALLBACK18-NEXT: shlxl %edx, %ebp, %eax
-; FALLBACK18-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl %ebx, %ecx
-; FALLBACK18-NEXT: shrxl %ebx, 32(%esp,%edi), %ebx
-; FALLBACK18-NEXT: addl %esi, %esi
-; FALLBACK18-NEXT: shlxl %edx, %esi, %eax
-; FALLBACK18-NEXT: orl %ebx, %eax
-; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl 48(%esp,%edi), %eax
-; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: leal (%eax,%eax), %ebx
-; FALLBACK18-NEXT: shlxl %edx, %ebx, %esi
-; FALLBACK18-NEXT: movl 44(%esp,%edi), %ebp
-; FALLBACK18-NEXT: movl %ecx, %eax
-; FALLBACK18-NEXT: shrxl %ecx, %ebp, %ebx
-; FALLBACK18-NEXT: orl %ebx, %esi
-; FALLBACK18-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; FALLBACK18-NEXT: movl %eax, %ebx
-; FALLBACK18-NEXT: addl %ebp, %ebp
-; FALLBACK18-NEXT: shlxl %edx, %ebp, %eax
-; FALLBACK18-NEXT: orl %ecx, %eax
+; FALLBACK18-NEXT: movl 48(%esp,%esi), %edx
+; FALLBACK18-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: addl %edx, %edx
+; FALLBACK18-NEXT: shlxl %ecx, %edx, %ebx
+; FALLBACK18-NEXT: movl 44(%esp,%esi), %edx
+; FALLBACK18-NEXT: shrxl %eax, %edx, %edi
+; FALLBACK18-NEXT: orl %edi, %ebx
+; FALLBACK18-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shrxl %eax, %ebp, %edi
+; FALLBACK18-NEXT: movl %eax, %ebp
+; FALLBACK18-NEXT: addl %edx, %edx
+; FALLBACK18-NEXT: shlxl %ecx, %edx, %eax
+; FALLBACK18-NEXT: orl %edi, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl 56(%esp,%edi), %ebp
-; FALLBACK18-NEXT: leal (%ebp,%ebp), %ecx
-; FALLBACK18-NEXT: shlxl %edx, %ecx, %ecx
-; FALLBACK18-NEXT: movl 52(%esp,%edi), %eax
-; FALLBACK18-NEXT: shrxl %ebx, %eax, %esi
-; FALLBACK18-NEXT: orl %esi, %ecx
-; FALLBACK18-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; FALLBACK18-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 56(%esp,%esi), %edi
+; FALLBACK18-NEXT: leal (%edi,%edi), %edx
+; FALLBACK18-NEXT: shlxl %ecx, %edx, %edx
+; FALLBACK18-NEXT: movl 52(%esp,%esi), %eax
+; FALLBACK18-NEXT: shrxl %ebp, %eax, %ebx
+; FALLBACK18-NEXT: orl %ebx, %edx
+; FALLBACK18-NEXT: shrxl %ebp, {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
; FALLBACK18-NEXT: addl %eax, %eax
-; FALLBACK18-NEXT: shlxl %edx, %eax, %esi
-; FALLBACK18-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; FALLBACK18-NEXT: shrxl %ebx, %ebp, %eax
-; FALLBACK18-NEXT: movl 60(%esp,%edi), %edi
-; FALLBACK18-NEXT: shrxl %ebx, %edi, %ebx
-; FALLBACK18-NEXT: addl %edi, %edi
-; FALLBACK18-NEXT: shlxl %edx, %edi, %edi
-; FALLBACK18-NEXT: orl %eax, %edi
-; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %eax
-; FALLBACK18-NEXT: movl %ebx, 28(%eax)
-; FALLBACK18-NEXT: movl %edi, 24(%eax)
-; FALLBACK18-NEXT: movl %esi, 16(%eax)
-; FALLBACK18-NEXT: movl %ecx, 20(%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, 8(%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, 12(%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, (%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, 4(%eax)
+; FALLBACK18-NEXT: shlxl %ecx, %eax, %eax
+; FALLBACK18-NEXT: orl %ebx, %eax
+; FALLBACK18-NEXT: movl 60(%esp,%esi), %esi
+; FALLBACK18-NEXT: leal (%esi,%esi), %ebx
+; FALLBACK18-NEXT: shlxl %ecx, %ebx, %ecx
+; FALLBACK18-NEXT: shrxl %ebp, %edi, %edi
+; FALLBACK18-NEXT: orl %edi, %ecx
+; FALLBACK18-NEXT: shrxl %ebp, %esi, %esi
+; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %edi
+; FALLBACK18-NEXT: movl %esi, 28(%edi)
+; FALLBACK18-NEXT: movl %ecx, 24(%edi)
+; FALLBACK18-NEXT: movl %eax, 16(%edi)
+; FALLBACK18-NEXT: movl %edx, 20(%edi)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, 8(%edi)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, 12(%edi)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, (%edi)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, 4(%edi)
; FALLBACK18-NEXT: addl $108, %esp
; FALLBACK18-NEXT: popl %esi
; FALLBACK18-NEXT: popl %edi
@@ -4261,72 +4258,70 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %ecx
; FALLBACK22-NEXT: movups (%ecx), %xmm0
; FALLBACK22-NEXT: movups 16(%ecx), %xmm1
-; FALLBACK22-NEXT: movzbl (%eax), %ecx
-; FALLBACK22-NEXT: movl %ecx, %edx
-; FALLBACK22-NEXT: shlb $3, %dl
+; FALLBACK22-NEXT: movzbl (%eax), %edx
+; FALLBACK22-NEXT: movl %edx, %ecx
+; FALLBACK22-NEXT: shlb $3, %cl
; FALLBACK22-NEXT: xorps %xmm2, %xmm2
; FALLBACK22-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
; FALLBACK22-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
; FALLBACK22-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
; FALLBACK22-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
-; FALLBACK22-NEXT: andb $28, %cl
-; FALLBACK22-NEXT: movzbl %cl, %edi
-; FALLBACK22-NEXT: shrxl %edx, 32(%esp,%edi), %ecx
-; FALLBACK22-NEXT: movl %edx, %eax
-; FALLBACK22-NEXT: notb %al
-; FALLBACK22-NEXT: movl 36(%esp,%edi), %esi
-; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: addl %esi, %esi
-; FALLBACK22-NEXT: shlxl %eax, %esi, %esi
-; FALLBACK22-NEXT: orl %ecx, %esi
-; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: movl 48(%esp,%edi), %ecx
-; FALLBACK22-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: addl %ecx, %ecx
-; FALLBACK22-NEXT: shlxl %eax, %ecx, %esi
-; FALLBACK22-NEXT: movl %eax, %ebp
-; FALLBACK22-NEXT: movl 44(%esp,%edi), %ecx
-; FALLBACK22-NEXT: shrxl %edx, %ecx, %ebx
-; FALLBACK22-NEXT: orl %ebx, %esi
-; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: addl %ecx, %ecx
-; FALLBACK22-NEXT: shlxl %eax, %ecx, %esi
-; FALLBACK22-NEXT: movl 40(%esp,%edi), %eax
+; FALLBACK22-NEXT: movl %ecx, %eax
+; FALLBACK22-NEXT: andb $28, %dl
+; FALLBACK22-NEXT: movzbl %dl, %ebx
+; FALLBACK22-NEXT: shrxl %eax, 32(%esp,%ebx), %edx
+; FALLBACK22-NEXT: movl %eax, %edi
+; FALLBACK22-NEXT: notb %cl
+; FALLBACK22-NEXT: movl 36(%esp,%ebx), %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shrxl %edx, %eax, %ebx
-; FALLBACK22-NEXT: orl %ebx, %esi
-; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: movl 56(%esp,%edi), %esi
-; FALLBACK22-NEXT: leal (%esi,%esi), %ebx
-; FALLBACK22-NEXT: shlxl %ebp, %ebx, %eax
-; FALLBACK22-NEXT: movl %ebp, %ecx
-; FALLBACK22-NEXT: movl 52(%esp,%edi), %ebx
-; FALLBACK22-NEXT: shrxl %edx, %ebx, %ebp
-; FALLBACK22-NEXT: orl %ebp, %eax
+; FALLBACK22-NEXT: leal (%eax,%eax), %esi
+; FALLBACK22-NEXT: shlxl %ecx, %esi, %eax
+; FALLBACK22-NEXT: orl %edx, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
-; FALLBACK22-NEXT: addl %ebx, %ebx
+; FALLBACK22-NEXT: movl 48(%esp,%ebx), %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: leal (%eax,%eax), %edx
+; FALLBACK22-NEXT: shlxl %ecx, %edx, %eax
+; FALLBACK22-NEXT: movl 44(%esp,%ebx), %edx
+; FALLBACK22-NEXT: shrxl %edi, %edx, %esi
+; FALLBACK22-NEXT: orl %esi, %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: addl %edx, %edx
+; FALLBACK22-NEXT: shlxl %ecx, %edx, %eax
+; FALLBACK22-NEXT: movl 40(%esp,%ebx), %edx
+; FALLBACK22-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shrxl %edi, %edx, %esi
+; FALLBACK22-NEXT: movl %edi, %edx
+; FALLBACK22-NEXT: orl %esi, %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl 56(%esp,%ebx), %esi
+; FALLBACK22-NEXT: leal (%esi,%esi), %ebp
+; FALLBACK22-NEXT: shlxl %ecx, %ebp, %ebp
+; FALLBACK22-NEXT: movl 52(%esp,%ebx), %eax
+; FALLBACK22-NEXT: shrxl %edi, %eax, %edi
+; FALLBACK22-NEXT: orl %edi, %ebp
+; FALLBACK22-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK22-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: addl %eax, %eax
+; FALLBACK22-NEXT: shlxl %ecx, %eax, %edi
+; FALLBACK22-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK22-NEXT: shrxl %edx, %esi, %eax
+; FALLBACK22-NEXT: movl 60(%esp,%ebx), %esi
+; FALLBACK22-NEXT: leal (%esi,%esi), %ebx
; FALLBACK22-NEXT: shlxl %ecx, %ebx, %ebx
-; FALLBACK22-NEXT: orl %ebp, %ebx
-; FALLBACK22-NEXT: shrxl %edx, %esi, %ebp
-; FALLBACK22-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; FALLBACK22-NEXT: movl 60(%esp,%edi), %edi
-; FALLBACK22-NEXT: shrxl %edx, %edi, %eax
-; FALLBACK22-NEXT: addl %edi, %edi
-; FALLBACK22-NEXT: movl %ecx, %edx
-; FALLBACK22-NEXT: shlxl %ecx, %edi, %edi
-; FALLBACK22-NEXT: orl %ebp, %edi
-; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK22-NEXT: addl %ecx, %ecx
-; FALLBACK22-NEXT: shlxl %edx, %ecx, %ecx
-; FALLBACK22-NEXT: orl %esi, %ecx
-; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %edx
-; FALLBACK22-NEXT: movl %eax, 28(%edx)
-; FALLBACK22-NEXT: movl %ecx, 4(%edx)
-; FALLBACK22-NEXT: movl %edi, 24(%edx)
-; FALLBACK22-NEXT: movl %ebx, 16(%edx)
+; FALLBACK22-NEXT: orl %eax, %ebx
; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK22-NEXT: movl %eax, 20(%edx)
+; FALLBACK22-NEXT: addl %eax, %eax
+; FALLBACK22-NEXT: shlxl %ecx, %eax, %eax
+; FALLBACK22-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; FALLBACK22-NEXT: orl %ecx, %eax
+; FALLBACK22-NEXT: shrxl %edx, %esi, %ecx
+; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK22-NEXT: movl %ecx, 28(%edx)
+; FALLBACK22-NEXT: movl %eax, 4(%edx)
+; FALLBACK22-NEXT: movl %ebx, 24(%edx)
+; FALLBACK22-NEXT: movl %edi, 16(%edx)
+; FALLBACK22-NEXT: movl %ebp, 20(%edx)
; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; FALLBACK22-NEXT: movl %eax, 8(%edx)
; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
@@ -4585,70 +4580,68 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %eax
; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %ecx
; FALLBACK26-NEXT: vmovups (%ecx), %ymm0
-; FALLBACK26-NEXT: movzbl (%eax), %ecx
-; FALLBACK26-NEXT: movl %ecx, %edx
-; FALLBACK26-NEXT: shlb $3, %dl
+; FALLBACK26-NEXT: movzbl (%eax), %edx
+; FALLBACK26-NEXT: movl %edx, %ecx
+; FALLBACK26-NEXT: shlb $3, %cl
; FALLBACK26-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FALLBACK26-NEXT: vmovups %ymm1, {{[0-9]+}}(%esp)
; FALLBACK26-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
-; FALLBACK26-NEXT: andb $28, %cl
-; FALLBACK26-NEXT: movzbl %cl, %edi
-; FALLBACK26-NEXT: shrxl %edx, 32(%esp,%edi), %ecx
-; FALLBACK26-NEXT: movl %edx, %eax
-; FALLBACK26-NEXT: notb %al
-; FALLBACK26-NEXT: movl 36(%esp,%edi), %esi
-; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: addl %esi, %esi
-; FALLBACK26-NEXT: shlxl %eax, %esi, %esi
-; FALLBACK26-NEXT: orl %ecx, %esi
-; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: movl 48(%esp,%edi), %ecx
-; FALLBACK26-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: addl %ecx, %ecx
-; FALLBACK26-NEXT: shlxl %eax, %ecx, %esi
-; FALLBACK26-NEXT: movl %eax, %ebp
-; FALLBACK26-NEXT: movl 44(%esp,%edi), %ecx
-; FALLBACK26-NEXT: shrxl %edx, %ecx, %ebx
-; FALLBACK26-NEXT: orl %ebx, %esi
-; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: addl %ecx, %ecx
-; FALLBACK26-NEXT: shlxl %eax, %ecx, %esi
-; FALLBACK26-NEXT: movl 40(%esp,%edi), %eax
+; FALLBACK26-NEXT: movl %ecx, %eax
+; FALLBACK26-NEXT: andb $28, %dl
+; FALLBACK26-NEXT: movzbl %dl, %ebx
+; FALLBACK26-NEXT: shrxl %eax, 32(%esp,%ebx), %edx
+; FALLBACK26-NEXT: movl %eax, %edi
+; FALLBACK26-NEXT: notb %cl
+; FALLBACK26-NEXT: movl 36(%esp,%ebx), %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shrxl %edx, %eax, %ebx
-; FALLBACK26-NEXT: orl %ebx, %esi
-; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: movl 56(%esp,%edi), %esi
-; FALLBACK26-NEXT: leal (%esi,%esi), %ebx
-; FALLBACK26-NEXT: shlxl %ebp, %ebx, %eax
-; FALLBACK26-NEXT: movl %ebp, %ecx
-; FALLBACK26-NEXT: movl 52(%esp,%edi), %ebx
-; FALLBACK26-NEXT: shrxl %edx, %ebx, %ebp
-; FALLBACK26-NEXT: orl %ebp, %eax
+; FALLBACK26-NEXT: leal (%eax,%eax), %esi
+; FALLBACK26-NEXT: shlxl %ecx, %esi, %eax
+; FALLBACK26-NEXT: orl %edx, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
-; FALLBACK26-NEXT: addl %ebx, %ebx
+; FALLBACK26-NEXT: movl 48(%esp,%ebx), %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: leal (%eax,%eax), %edx
+; FALLBACK26-NEXT: shlxl %ecx, %edx, %eax
+; FALLBACK26-NEXT: movl 44(%esp,%ebx), %edx
+; FALLBACK26-NEXT: shrxl %edi, %edx, %esi
+; FALLBACK26-NEXT: orl %esi, %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: addl %edx, %edx
+; FALLBACK26-NEXT: shlxl %ecx, %edx, %eax
+; FALLBACK26-NEXT: movl 40(%esp,%ebx), %edx
+; FALLBACK26-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shrxl %edi, %edx, %esi
+; FALLBACK26-NEXT: movl %edi, %edx
+; FALLBACK26-NEXT: orl %esi, %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl 56(%esp,%ebx), %esi
+; FALLBACK26-NEXT: leal (%esi,%esi), %ebp
+; FALLBACK26-NEXT: shlxl %ecx, %ebp, %ebp
+; FALLBACK26-NEXT: movl 52(%esp,%ebx), %eax
+; FALLBACK26-NEXT: shrxl %edi, %eax, %edi
+; FALLBACK26-NEXT: orl %edi, %ebp
+; FALLBACK26-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK26-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: addl %eax, %eax
+; FALLBACK26-NEXT: shlxl %ecx, %eax, %edi
+; FALLBACK26-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK26-NEXT: shrxl %edx, %esi, %eax
+; FALLBACK26-NEXT: movl 60(%esp,%ebx), %esi
+; FALLBACK26-NEXT: leal (%esi,%esi), %ebx
; FALLBACK26-NEXT: shlxl %ecx, %ebx, %ebx
-; FALLBACK26-NEXT: orl %ebp, %ebx
-; FALLBACK26-NEXT: shrxl %edx, %esi, %ebp
-; FALLBACK26-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; FALLBACK26-NEXT: movl 60(%esp,%edi), %edi
-; FALLBACK26-NEXT: shrxl %edx, %edi, %eax
-; FALLBACK26-NEXT: addl %edi, %edi
-; FALLBACK26-NEXT: movl %ecx, %edx
-; FALLBACK26-NEXT: shlxl %ecx, %edi, %edi
-; FALLBACK26-NEXT: orl %ebp, %edi
-; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK26-NEXT: addl %ecx, %ecx
-; FALLBACK26-NEXT: shlxl %edx, %ecx, %ecx
-; FALLBACK26-NEXT: orl %esi, %ecx
-; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %edx
-; FALLBACK26-NEXT: movl %eax, 28(%edx)
-; FALLBACK26-NEXT: movl %ecx, 4(%edx)
-; FALLBACK26-NEXT: movl %edi, 24(%edx)
-; FALLBACK26-NEXT: movl %ebx, 16(%edx)
+; FALLBACK26-NEXT: orl %eax, %ebx
; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK26-NEXT: movl %eax, 20(%edx)
+; FALLBACK26-NEXT: addl %eax, %eax
+; FALLBACK26-NEXT: shlxl %ecx, %eax, %eax
+; FALLBACK26-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; FALLBACK26-NEXT: orl %ecx, %eax
+; FALLBACK26-NEXT: shrxl %edx, %esi, %ecx
+; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK26-NEXT: movl %ecx, 28(%edx)
+; FALLBACK26-NEXT: movl %eax, 4(%edx)
+; FALLBACK26-NEXT: movl %ebx, 24(%edx)
+; FALLBACK26-NEXT: movl %edi, 16(%edx)
+; FALLBACK26-NEXT: movl %ebp, 20(%edx)
; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; FALLBACK26-NEXT: movl %eax, 8(%edx)
; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
@@ -4906,70 +4899,68 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %eax
; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %ecx
; FALLBACK30-NEXT: vmovups (%ecx), %ymm0
-; FALLBACK30-NEXT: movzbl (%eax), %ecx
-; FALLBACK30-NEXT: movl %ecx, %edx
-; FALLBACK30-NEXT: shlb $3, %dl
+; FALLBACK30-NEXT: movzbl (%eax), %edx
+; FALLBACK30-NEXT: movl %edx, %ecx
+; FALLBACK30-NEXT: shlb $3, %cl
; FALLBACK30-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FALLBACK30-NEXT: vmovups %ymm1, {{[0-9]+}}(%esp)
; FALLBACK30-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
-; FALLBACK30-NEXT: andb $28, %cl
-; FALLBACK30-NEXT: movzbl %cl, %edi
-; FALLBACK30-NEXT: shrxl %edx, 32(%esp,%edi), %ecx
-; FALLBACK30-NEXT: movl %edx, %eax
-; FALLBACK30-NEXT: notb %al
-; FALLBACK30-NEXT: movl 36(%esp,%edi), %esi
-; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: addl %esi, %esi
-; FALLBACK30-NEXT: shlxl %eax, %esi, %esi
-; FALLBACK30-NEXT: orl %ecx, %esi
-; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: movl 48(%esp,%edi), %ecx
-; FALLBACK30-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: addl %ecx, %ecx
-; FALLBACK30-NEXT: shlxl %eax, %ecx, %esi
-; FALLBACK30-NEXT: movl %eax, %ebp
-; FALLBACK30-NEXT: movl 44(%esp,%edi), %ecx
-; FALLBACK30-NEXT: shrxl %edx, %ecx, %ebx
-; FALLBACK30-NEXT: orl %ebx, %esi
-; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: addl %ecx, %ecx
-; FALLBACK30-NEXT: shlxl %eax, %ecx, %esi
-; FALLBACK30-NEXT: movl 40(%esp,%edi), %eax
+; FALLBACK30-NEXT: movl %ecx, %eax
+; FALLBACK30-NEXT: andb $28, %dl
+; FALLBACK30-NEXT: movzbl %dl, %ebx
+; FALLBACK30-NEXT: shrxl %eax, 32(%esp,%ebx), %edx
+; FALLBACK30-NEXT: movl %eax, %edi
+; FALLBACK30-NEXT: notb %cl
+; FALLBACK30-NEXT: movl 36(%esp,%ebx), %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shrxl %edx, %eax, %ebx
-; FALLBACK30-NEXT: orl %ebx, %esi
-; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: movl 56(%esp,%edi), %esi
-; FALLBACK30-NEXT: leal (%esi,%esi), %ebx
-; FALLBACK30-NEXT: shlxl %ebp, %ebx, %eax
-; FALLBACK30-NEXT: movl %ebp, %ecx
-; FALLBACK30-NEXT: movl 52(%esp,%edi), %ebx
-; FALLBACK30-NEXT: shrxl %edx, %ebx, %ebp
-; FALLBACK30-NEXT: orl %ebp, %eax
+; FALLBACK30-NEXT: leal (%eax,%eax), %esi
+; FALLBACK30-NEXT: shlxl %ecx, %esi, %eax
+; FALLBACK30-NEXT: orl %edx, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
-; FALLBACK30-NEXT: addl %ebx, %ebx
+; FALLBACK30-NEXT: movl 48(%esp,%ebx), %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: leal (%eax,%eax), %edx
+; FALLBACK30-NEXT: shlxl %ecx, %edx, %eax
+; FALLBACK30-NEXT: movl 44(%esp,%ebx), %edx
+; FALLBACK30-NEXT: shrxl %edi, %edx, %esi
+; FALLBACK30-NEXT: orl %esi, %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: addl %edx, %edx
+; FALLBACK30-NEXT: shlxl %ecx, %edx, %eax
+; FALLBACK30-NEXT: movl 40(%esp,%ebx), %edx
+; FALLBACK30-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shrxl %edi, %edx, %esi
+; FALLBACK30-NEXT: movl %edi, %edx
+; FALLBACK30-NEXT: orl %esi, %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl 56(%esp,%ebx), %esi
+; FALLBACK30-NEXT: leal (%esi,%esi), %ebp
+; FALLBACK30-NEXT: shlxl %ecx, %ebp, %ebp
+; FALLBACK30-NEXT: movl 52(%esp,%ebx), %eax
+; FALLBACK30-NEXT: shrxl %edi, %eax, %edi
+; FALLBACK30-NEXT: orl %edi, %ebp
+; FALLBACK30-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK30-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: addl %eax, %eax
+; FALLBACK30-NEXT: shlxl %ecx, %eax, %edi
+; FALLBACK30-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK30-NEXT: shrxl %edx, %esi, %eax
+; FALLBACK30-NEXT: movl 60(%esp,%ebx), %esi
+; FALLBACK30-NEXT: leal (%esi,%esi), %ebx
; FALLBACK30-NEXT: shlxl %ecx, %ebx, %ebx
-; FALLBACK30-NEXT: orl %ebp, %ebx
-; FALLBACK30-NEXT: shrxl %edx, %esi, %ebp
-; FALLBACK30-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; FALLBACK30-NEXT: movl 60(%esp,%edi), %edi
-; FALLBACK30-NEXT: shrxl %edx, %edi, %eax
-; FALLBACK30-NEXT: addl %edi, %edi
-; FALLBACK30-NEXT: movl %ecx, %edx
-; FALLBACK30-NEXT: shlxl %ecx, %edi, %edi
-; FALLBACK30-NEXT: orl %ebp, %edi
-; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK30-NEXT: addl %ecx, %ecx
-; FALLBACK30-NEXT: shlxl %edx, %ecx, %ecx
-; FALLBACK30-NEXT: orl %esi, %ecx
-; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %edx
-; FALLBACK30-NEXT: movl %eax, 28(%edx)
-; FALLBACK30-NEXT: movl %ecx, 4(%edx)
-; FALLBACK30-NEXT: movl %edi, 24(%edx)
-; FALLBACK30-NEXT: movl %ebx, 16(%edx)
+; FALLBACK30-NEXT: orl %eax, %ebx
; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK30-NEXT: movl %eax, 20(%edx)
+; FALLBACK30-NEXT: addl %eax, %eax
+; FALLBACK30-NEXT: shlxl %ecx, %eax, %eax
+; FALLBACK30-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; FALLBACK30-NEXT: orl %ecx, %eax
+; FALLBACK30-NEXT: shrxl %edx, %esi, %ecx
+; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK30-NEXT: movl %ecx, 28(%edx)
+; FALLBACK30-NEXT: movl %eax, 4(%edx)
+; FALLBACK30-NEXT: movl %ebx, 24(%edx)
+; FALLBACK30-NEXT: movl %edi, 16(%edx)
+; FALLBACK30-NEXT: movl %ebp, 20(%edx)
; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; FALLBACK30-NEXT: movl %eax, 8(%edx)
; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
@@ -5157,30 +5148,30 @@ define void @lshr_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) no
; FALLBACK2-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
; FALLBACK2-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
; FALLBACK2-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movl %eax, %ecx
; FALLBACK2-NEXT: andb $6, %sil
-; FALLBACK2-NEXT: movzbl %sil, %ecx
-; FALLBACK2-NEXT: movq -64(%rsp,%rcx,4), %rsi
-; FALLBACK2-NEXT: movq -56(%rsp,%rcx,4), %rdi
-; FALLBACK2-NEXT: shrxq %rax, %rsi, %r8
-; FALLBACK2-NEXT: shrxq %rax, -72(%rsp,%rcx,4), %r9
-; FALLBACK2-NEXT: shrxq %rax, %rdi, %r10
-; FALLBACK2-NEXT: movq -48(%rsp,%rcx,4), %rcx
-; FALLBACK2-NEXT: shrxq %rax, %rcx, %r11
-; FALLBACK2-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK2-NEXT: movzbl %sil, %esi
+; FALLBACK2-NEXT: movq -64(%rsp,%rsi,4), %rdi
+; FALLBACK2-NEXT: movq -56(%rsp,%rsi,4), %r8
+; FALLBACK2-NEXT: shrxq %rcx, %rdi, %r9
; FALLBACK2-NEXT: notb %al
+; FALLBACK2-NEXT: leaq (%r8,%r8), %r10
+; FALLBACK2-NEXT: shlxq %rax, %r10, %r10
+; FALLBACK2-NEXT: orq %r9, %r10
+; FALLBACK2-NEXT: shrxq %rcx, -72(%rsp,%rsi,4), %r9
; FALLBACK2-NEXT: addq %rdi, %rdi
; FALLBACK2-NEXT: shlxq %rax, %rdi, %rdi
-; FALLBACK2-NEXT: orq %r8, %rdi
-; FALLBACK2-NEXT: addq %rsi, %rsi
-; FALLBACK2-NEXT: shlxq %rax, %rsi, %rsi
-; FALLBACK2-NEXT: orq %r9, %rsi
-; FALLBACK2-NEXT: addq %rcx, %rcx
-; FALLBACK2-NEXT: shlxq %rax, %rcx, %rax
-; FALLBACK2-NEXT: orq %r10, %rax
-; FALLBACK2-NEXT: movq %r11, 24(%rdx)
+; FALLBACK2-NEXT: orq %r9, %rdi
+; FALLBACK2-NEXT: shrxq %rcx, %r8, %r8
+; FALLBACK2-NEXT: movq -48(%rsp,%rsi,4), %rsi
+; FALLBACK2-NEXT: leaq (%rsi,%rsi), %r9
+; FALLBACK2-NEXT: shlxq %rax, %r9, %rax
+; FALLBACK2-NEXT: orq %r8, %rax
+; FALLBACK2-NEXT: shrxq %rcx, %rsi, %rcx
+; FALLBACK2-NEXT: movq %rcx, 24(%rdx)
; FALLBACK2-NEXT: movq %rax, 16(%rdx)
-; FALLBACK2-NEXT: movq %rsi, (%rdx)
-; FALLBACK2-NEXT: movq %rdi, 8(%rdx)
+; FALLBACK2-NEXT: movq %rdi, (%rdx)
+; FALLBACK2-NEXT: movq %r10, 8(%rdx)
; FALLBACK2-NEXT: retq
;
; FALLBACK3-LABEL: lshr_32bytes_dwordOff:
@@ -5307,30 +5298,30 @@ define void @lshr_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) no
; FALLBACK6-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movl %eax, %esi
; FALLBACK6-NEXT: andb $6, %cl
; FALLBACK6-NEXT: movzbl %cl, %ecx
-; FALLBACK6-NEXT: shrxq %rax, -72(%rsp,%rcx,4), %rsi
-; FALLBACK6-NEXT: movq -64(%rsp,%rcx,4), %rdi
-; FALLBACK6-NEXT: movq -56(%rsp,%rcx,4), %r8
-; FALLBACK6-NEXT: shrxq %rax, %r8, %r9
-; FALLBACK6-NEXT: movq -48(%rsp,%rcx,4), %rcx
-; FALLBACK6-NEXT: shrxq %rax, %rdi, %r10
-; FALLBACK6-NEXT: shrxq %rax, %rcx, %r11
-; FALLBACK6-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK6-NEXT: shrxq %rsi, -72(%rsp,%rcx,4), %rdi
; FALLBACK6-NEXT: notb %al
-; FALLBACK6-NEXT: addq %rdi, %rdi
-; FALLBACK6-NEXT: shlxq %rax, %rdi, %rdi
-; FALLBACK6-NEXT: orq %rsi, %rdi
-; FALLBACK6-NEXT: addq %rcx, %rcx
-; FALLBACK6-NEXT: shlxq %rax, %rcx, %rcx
-; FALLBACK6-NEXT: orq %r9, %rcx
-; FALLBACK6-NEXT: addq %r8, %r8
-; FALLBACK6-NEXT: shlxq %rax, %r8, %rax
-; FALLBACK6-NEXT: orq %r10, %rax
-; FALLBACK6-NEXT: movq %r11, 24(%rdx)
+; FALLBACK6-NEXT: movq -64(%rsp,%rcx,4), %r8
+; FALLBACK6-NEXT: movq -56(%rsp,%rcx,4), %r9
+; FALLBACK6-NEXT: leaq (%r8,%r8), %r10
+; FALLBACK6-NEXT: shlxq %rax, %r10, %r10
+; FALLBACK6-NEXT: orq %rdi, %r10
+; FALLBACK6-NEXT: shrxq %rsi, %r9, %rdi
+; FALLBACK6-NEXT: movq -48(%rsp,%rcx,4), %rcx
+; FALLBACK6-NEXT: leaq (%rcx,%rcx), %r11
+; FALLBACK6-NEXT: shlxq %rax, %r11, %r11
+; FALLBACK6-NEXT: orq %rdi, %r11
+; FALLBACK6-NEXT: shrxq %rsi, %r8, %rdi
+; FALLBACK6-NEXT: addq %r9, %r9
+; FALLBACK6-NEXT: shlxq %rax, %r9, %rax
+; FALLBACK6-NEXT: orq %rdi, %rax
+; FALLBACK6-NEXT: shrxq %rsi, %rcx, %rcx
+; FALLBACK6-NEXT: movq %rcx, 24(%rdx)
; FALLBACK6-NEXT: movq %rax, 8(%rdx)
-; FALLBACK6-NEXT: movq %rcx, 16(%rdx)
-; FALLBACK6-NEXT: movq %rdi, (%rdx)
+; FALLBACK6-NEXT: movq %r11, 16(%rdx)
+; FALLBACK6-NEXT: movq %r10, (%rdx)
; FALLBACK6-NEXT: retq
;
; FALLBACK7-LABEL: lshr_32bytes_dwordOff:
@@ -5441,36 +5432,36 @@ define void @lshr_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) no
; FALLBACK10-LABEL: lshr_32bytes_dwordOff:
; FALLBACK10: # %bb.0:
; FALLBACK10-NEXT: vmovups (%rdi), %ymm0
-; FALLBACK10-NEXT: movzbl (%rsi), %ecx
-; FALLBACK10-NEXT: movl %ecx, %eax
-; FALLBACK10-NEXT: shlb $5, %al
+; FALLBACK10-NEXT: movzbl (%rsi), %eax
+; FALLBACK10-NEXT: movl %eax, %ecx
+; FALLBACK10-NEXT: shlb $5, %cl
; FALLBACK10-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FALLBACK10-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
; FALLBACK10-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; FALLBACK10-NEXT: andb $6, %cl
-; FALLBACK10-NEXT: movzbl %cl, %ecx
-; FALLBACK10-NEXT: shrxq %rax, -72(%rsp,%rcx,4), %rsi
-; FALLBACK10-NEXT: movq -64(%rsp,%rcx,4), %rdi
-; FALLBACK10-NEXT: movq -56(%rsp,%rcx,4), %r8
-; FALLBACK10-NEXT: shrxq %rax, %r8, %r9
-; FALLBACK10-NEXT: movq -48(%rsp,%rcx,4), %rcx
-; FALLBACK10-NEXT: shrxq %rax, %rdi, %r10
-; FALLBACK10-NEXT: shrxq %rax, %rcx, %r11
-; FALLBACK10-NEXT: # kill: def $al killed $al killed $rax def $rax
-; FALLBACK10-NEXT: notb %al
-; FALLBACK10-NEXT: addq %rdi, %rdi
-; FALLBACK10-NEXT: shlxq %rax, %rdi, %rdi
-; FALLBACK10-NEXT: orq %rsi, %rdi
-; FALLBACK10-NEXT: addq %rcx, %rcx
-; FALLBACK10-NEXT: shlxq %rax, %rcx, %rcx
-; FALLBACK10-NEXT: orq %r9, %rcx
-; FALLBACK10-NEXT: addq %r8, %r8
-; FALLBACK10-NEXT: shlxq %rax, %r8, %rax
-; FALLBACK10-NEXT: orq %r10, %rax
-; FALLBACK10-NEXT: movq %r11, 24(%rdx)
-; FALLBACK10-NEXT: movq %rax, 8(%rdx)
-; FALLBACK10-NEXT: movq %rcx, 16(%rdx)
-; FALLBACK10-NEXT: movq %rdi, (%rdx)
+; FALLBACK10-NEXT: movl %ecx, %esi
+; FALLBACK10-NEXT: andb $6, %al
+; FALLBACK10-NEXT: movzbl %al, %eax
+; FALLBACK10-NEXT: shrxq %rsi, -72(%rsp,%rax,4), %rdi
+; FALLBACK10-NEXT: notb %cl
+; FALLBACK10-NEXT: movq -64(%rsp,%rax,4), %r8
+; FALLBACK10-NEXT: movq -56(%rsp,%rax,4), %r9
+; FALLBACK10-NEXT: leaq (%r8,%r8), %r10
+; FALLBACK10-NEXT: shlxq %rcx, %r10, %r10
+; FALLBACK10-NEXT: orq %rdi, %r10
+; FALLBACK10-NEXT: shrxq %rsi, %r9, %rdi
+; FALLBACK10-NEXT: movq -48(%rsp,%rax,4), %rax
+; FALLBACK10-NEXT: leaq (%rax,%rax), %r11
+; FALLBACK10-NEXT: shlxq %rcx, %r11, %r11
+; FALLBACK10-NEXT: orq %rdi, %r11
+; FALLBACK10-NEXT: shrxq %rsi, %r8, %rdi
+; FALLBACK10-NEXT: addq %r9, %r9
+; FALLBACK10-NEXT: shlxq %rcx, %r9, %rcx
+; FALLBACK10-NEXT: orq %rdi, %rcx
+; FALLBACK10-NEXT: shrxq %rsi, %rax, %rax
+; FALLBACK10-NEXT: movq %rax, 24(%rdx)
+; FALLBACK10-NEXT: movq %rcx, 8(%rdx)
+; FALLBACK10-NEXT: movq %r11, 16(%rdx)
+; FALLBACK10-NEXT: movq %r10, (%rdx)
; FALLBACK10-NEXT: vzeroupper
; FALLBACK10-NEXT: retq
;
@@ -5580,36 +5571,36 @@ define void @lshr_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) no
; FALLBACK14-LABEL: lshr_32bytes_dwordOff:
; FALLBACK14: # %bb.0:
; FALLBACK14-NEXT: vmovups (%rdi), %ymm0
-; FALLBACK14-NEXT: movzbl (%rsi), %ecx
-; FALLBACK14-NEXT: movl %ecx, %eax
-; FALLBACK14-NEXT: shlb $5, %al
+; FALLBACK14-NEXT: movzbl (%rsi), %eax
+; FALLBACK14-NEXT: movl %eax, %ecx
+; FALLBACK14-NEXT: shlb $5, %cl
; FALLBACK14-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FALLBACK14-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
; FALLBACK14-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; FALLBACK14-NEXT: andb $6, %cl
-; FALLBACK14-NEXT: movzbl %cl, %ecx
-; FALLBACK14-NEXT: shrxq %rax, -72(%rsp,%rcx,4), %rsi
-; FALLBACK14-NEXT: movq -64(%rsp,%rcx,4), %rdi
-; FALLBACK14-NEXT: movq -56(%rsp,%rcx,4), %r8
-; FALLBACK14-NEXT: shrxq %rax, %r8, %r9
-; FALLBACK14-NEXT: movq -48(%rsp,%rcx,4), %rcx
-; FALLBACK14-NEXT: shrxq %rax, %rdi, %r10
-; FALLBACK14-NEXT: shrxq %rax, %rcx, %r11
-; FALLBACK14-NEXT: # kill: def $al killed $al killed $rax def $rax
-; FALLBACK14-NEXT: notb %al
-; FALLBACK14-NEXT: addq %rdi, %rdi
-; FALLBACK14-NEXT: shlxq %rax, %rdi, %rdi
-; FALLBACK14-NEXT: orq %rsi, %rdi
-; FALLBACK14-NEXT: addq %rcx, %rcx
-; FALLBACK14-NEXT: shlxq %rax, %rcx, %rcx
-; FALLBACK14-NEXT: orq %r9, %rcx
-; FALLBACK14-NEXT: addq %r8, %r8
-; FALLBACK14-NEXT: shlxq %rax, %r8, %rax
-; FALLBACK14-NEXT: orq %r10, %rax
-; FALLBACK14-NEXT: movq %r11, 24(%rdx)
-; FALLBACK14-NEXT: movq %rax, 8(%rdx)
-; FALLBACK14-NEXT: movq %rcx, 16(%rdx)
-; FALLBACK14-NEXT: movq %rdi, (%rdx)
+; FALLBACK14-NEXT: movl %ecx, %esi
+; FALLBACK14-NEXT: andb $6, %al
+; FALLBACK14-NEXT: movzbl %al, %eax
+; FALLBACK14-NEXT: shrxq %rsi, -72(%rsp,%rax,4), %rdi
+; FALLBACK14-NEXT: notb %cl
+; FALLBACK14-NEXT: movq -64(%rsp,%rax,4), %r8
+; FALLBACK14-NEXT: movq -56(%rsp,%rax,4), %r9
+; FALLBACK14-NEXT: leaq (%r8,%r8), %r10
+; FALLBACK14-NEXT: shlxq %rcx, %r10, %r10
+; FALLBACK14-NEXT: orq %rdi, %r10
+; FALLBACK14-NEXT: shrxq %rsi, %r9, %rdi
+; FALLBACK14-NEXT: movq -48(%rsp,%rax,4), %rax
+; FALLBACK14-NEXT: leaq (%rax,%rax), %r11
+; FALLBACK14-NEXT: shlxq %rcx, %r11, %r11
+; FALLBACK14-NEXT: orq %rdi, %r11
+; FALLBACK14-NEXT: shrxq %rsi, %r8, %rdi
+; FALLBACK14-NEXT: addq %r9, %r9
+; FALLBACK14-NEXT: shlxq %rcx, %r9, %rcx
+; FALLBACK14-NEXT: orq %rdi, %rcx
+; FALLBACK14-NEXT: shrxq %rsi, %rax, %rax
+; FALLBACK14-NEXT: movq %rax, 24(%rdx)
+; FALLBACK14-NEXT: movq %rcx, 8(%rdx)
+; FALLBACK14-NEXT: movq %r11, 16(%rdx)
+; FALLBACK14-NEXT: movq %r10, (%rdx)
; FALLBACK14-NEXT: vzeroupper
; FALLBACK14-NEXT: retq
;
@@ -6025,31 +6016,31 @@ define void @shl_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK2-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
; FALLBACK2-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
; FALLBACK2-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movl %eax, %ecx
; FALLBACK2-NEXT: andb $24, %sil
; FALLBACK2-NEXT: negb %sil
-; FALLBACK2-NEXT: movsbq %sil, %rsi
-; FALLBACK2-NEXT: movq -40(%rsp,%rsi), %rdi
-; FALLBACK2-NEXT: movq -32(%rsp,%rsi), %rcx
-; FALLBACK2-NEXT: shlxq %rax, %rcx, %r8
-; FALLBACK2-NEXT: shlxq %rax, -16(%rsp,%rsi), %r9
-; FALLBACK2-NEXT: movq -24(%rsp,%rsi), %rsi
-; FALLBACK2-NEXT: shlxq %rax, %rsi, %r10
-; FALLBACK2-NEXT: shlxq %rax, %rdi, %r11
-; FALLBACK2-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK2-NEXT: movsbq %sil, %rdi
+; FALLBACK2-NEXT: movq -40(%rsp,%rdi), %r8
+; FALLBACK2-NEXT: movq -32(%rsp,%rdi), %rsi
+; FALLBACK2-NEXT: shlxq %rcx, %rsi, %r9
; FALLBACK2-NEXT: notb %al
+; FALLBACK2-NEXT: shlxq %rcx, %r8, %r10
+; FALLBACK2-NEXT: shrq %r8
+; FALLBACK2-NEXT: shrxq %rax, %r8, %r8
+; FALLBACK2-NEXT: orq %r9, %r8
+; FALLBACK2-NEXT: shlxq %rcx, -16(%rsp,%rdi), %r9
+; FALLBACK2-NEXT: movq -24(%rsp,%rdi), %rdi
+; FALLBACK2-NEXT: shlxq %rcx, %rdi, %rcx
; FALLBACK2-NEXT: shrq %rdi
; FALLBACK2-NEXT: shrxq %rax, %rdi, %rdi
-; FALLBACK2-NEXT: orq %r8, %rdi
+; FALLBACK2-NEXT: orq %r9, %rdi
; FALLBACK2-NEXT: shrq %rsi
-; FALLBACK2-NEXT: shrxq %rax, %rsi, %rsi
-; FALLBACK2-NEXT: orq %r9, %rsi
-; FALLBACK2-NEXT: shrq %rcx
-; FALLBACK2-NEXT: shrxq %rax, %rcx, %rax
-; FALLBACK2-NEXT: orq %r10, %rax
-; FALLBACK2-NEXT: movq %r11, (%rdx)
+; FALLBACK2-NEXT: shrxq %rax, %rsi, %rax
+; FALLBACK2-NEXT: orq %rcx, %rax
+; FALLBACK2-NEXT: movq %r10, (%rdx)
; FALLBACK2-NEXT: movq %rax, 16(%rdx)
-; FALLBACK2-NEXT: movq %rsi, 24(%rdx)
-; FALLBACK2-NEXT: movq %rdi, 8(%rdx)
+; FALLBACK2-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK2-NEXT: movq %r8, 8(%rdx)
; FALLBACK2-NEXT: retq
;
; FALLBACK3-LABEL: shl_32bytes:
@@ -6167,38 +6158,38 @@ define void @shl_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK6: # %bb.0:
; FALLBACK6-NEXT: movups (%rdi), %xmm0
; FALLBACK6-NEXT: movups 16(%rdi), %xmm1
-; FALLBACK6-NEXT: movzbl (%rsi), %ecx
-; FALLBACK6-NEXT: leal (,%rcx,8), %eax
+; FALLBACK6-NEXT: movzbl (%rsi), %esi
+; FALLBACK6-NEXT: leal (,%rsi,8), %eax
; FALLBACK6-NEXT: xorps %xmm2, %xmm2
; FALLBACK6-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; FALLBACK6-NEXT: andb $24, %cl
-; FALLBACK6-NEXT: negb %cl
-; FALLBACK6-NEXT: movsbq %cl, %rcx
-; FALLBACK6-NEXT: shlxq %rax, -16(%rsp,%rcx), %rsi
-; FALLBACK6-NEXT: movq -24(%rsp,%rcx), %rdi
-; FALLBACK6-NEXT: shlxq %rax, %rdi, %r8
-; FALLBACK6-NEXT: movq -40(%rsp,%rcx), %r9
-; FALLBACK6-NEXT: movq -32(%rsp,%rcx), %rcx
-; FALLBACK6-NEXT: shlxq %rax, %rcx, %r10
-; FALLBACK6-NEXT: shlxq %rax, %r9, %r11
-; FALLBACK6-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK6-NEXT: movl %eax, %ecx
+; FALLBACK6-NEXT: andb $24, %sil
+; FALLBACK6-NEXT: negb %sil
+; FALLBACK6-NEXT: movsbq %sil, %rsi
+; FALLBACK6-NEXT: shlxq %rcx, -16(%rsp,%rsi), %rdi
; FALLBACK6-NEXT: notb %al
+; FALLBACK6-NEXT: movq -24(%rsp,%rsi), %r8
+; FALLBACK6-NEXT: shlxq %rcx, %r8, %r9
+; FALLBACK6-NEXT: shrq %r8
+; FALLBACK6-NEXT: shrxq %rax, %r8, %r8
+; FALLBACK6-NEXT: orq %rdi, %r8
+; FALLBACK6-NEXT: movq -40(%rsp,%rsi), %rdi
+; FALLBACK6-NEXT: movq -32(%rsp,%rsi), %rsi
+; FALLBACK6-NEXT: shlxq %rcx, %rsi, %r10
+; FALLBACK6-NEXT: shrq %rsi
+; FALLBACK6-NEXT: shrxq %rax, %rsi, %rsi
+; FALLBACK6-NEXT: orq %r9, %rsi
+; FALLBACK6-NEXT: shlxq %rcx, %rdi, %rcx
; FALLBACK6-NEXT: shrq %rdi
-; FALLBACK6-NEXT: shrxq %rax, %rdi, %rdi
-; FALLBACK6-NEXT: orq %rsi, %rdi
-; FALLBACK6-NEXT: shrq %rcx
-; FALLBACK6-NEXT: shrxq %rax, %rcx, %rcx
-; FALLBACK6-NEXT: orq %r8, %rcx
-; FALLBACK6-NEXT: shrq %r9
-; FALLBACK6-NEXT: shrxq %rax, %r9, %rax
+; FALLBACK6-NEXT: shrxq %rax, %rdi, %rax
; FALLBACK6-NEXT: orq %r10, %rax
-; FALLBACK6-NEXT: movq %r11, (%rdx)
+; FALLBACK6-NEXT: movq %rcx, (%rdx)
; FALLBACK6-NEXT: movq %rax, 8(%rdx)
-; FALLBACK6-NEXT: movq %rcx, 16(%rdx)
-; FALLBACK6-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK6-NEXT: movq %rsi, 16(%rdx)
+; FALLBACK6-NEXT: movq %r8, 24(%rdx)
; FALLBACK6-NEXT: retq
;
; FALLBACK7-LABEL: shl_32bytes:
@@ -6308,36 +6299,36 @@ define void @shl_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK10-LABEL: shl_32bytes:
; FALLBACK10: # %bb.0:
; FALLBACK10-NEXT: vmovups (%rdi), %ymm0
-; FALLBACK10-NEXT: movzbl (%rsi), %ecx
-; FALLBACK10-NEXT: leal (,%rcx,8), %eax
+; FALLBACK10-NEXT: movzbl (%rsi), %esi
+; FALLBACK10-NEXT: leal (,%rsi,8), %eax
; FALLBACK10-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FALLBACK10-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
; FALLBACK10-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; FALLBACK10-NEXT: andb $24, %cl
-; FALLBACK10-NEXT: negb %cl
-; FALLBACK10-NEXT: movsbq %cl, %rcx
-; FALLBACK10-NEXT: shlxq %rax, -16(%rsp,%rcx), %rsi
-; FALLBACK10-NEXT: movq -24(%rsp,%rcx), %rdi
-; FALLBACK10-NEXT: shlxq %rax, %rdi, %r8
-; FALLBACK10-NEXT: movq -40(%rsp,%rcx), %r9
-; FALLBACK10-NEXT: movq -32(%rsp,%rcx), %rcx
-; FALLBACK10-NEXT: shlxq %rax, %rcx, %r10
-; FALLBACK10-NEXT: shlxq %rax, %r9, %r11
-; FALLBACK10-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK10-NEXT: movl %eax, %ecx
+; FALLBACK10-NEXT: andb $24, %sil
+; FALLBACK10-NEXT: negb %sil
+; FALLBACK10-NEXT: movsbq %sil, %rsi
+; FALLBACK10-NEXT: shlxq %rcx, -16(%rsp,%rsi), %rdi
; FALLBACK10-NEXT: notb %al
+; FALLBACK10-NEXT: movq -24(%rsp,%rsi), %r8
+; FALLBACK10-NEXT: shlxq %rcx, %r8, %r9
+; FALLBACK10-NEXT: shrq %r8
+; FALLBACK10-NEXT: shrxq %rax, %r8, %r8
+; FALLBACK10-NEXT: orq %rdi, %r8
+; FALLBACK10-NEXT: movq -40(%rsp,%rsi), %rdi
+; FALLBACK10-NEXT: movq -32(%rsp,%rsi), %rsi
+; FALLBACK10-NEXT: shlxq %rcx, %rsi, %r10
+; FALLBACK10-NEXT: shrq %rsi
+; FALLBACK10-NEXT: shrxq %rax, %rsi, %rsi
+; FALLBACK10-NEXT: orq %r9, %rsi
+; FALLBACK10-NEXT: shlxq %rcx, %rdi, %rcx
; FALLBACK10-NEXT: shrq %rdi
-; FALLBACK10-NEXT: shrxq %rax, %rdi, %rdi
-; FALLBACK10-NEXT: orq %rsi, %rdi
-; FALLBACK10-NEXT: shrq %rcx
-; FALLBACK10-NEXT: shrxq %rax, %rcx, %rcx
-; FALLBACK10-NEXT: orq %r8, %rcx
-; FALLBACK10-NEXT: shrq %r9
-; FALLBACK10-NEXT: shrxq %rax, %r9, %rax
+; FALLBACK10-NEXT: shrxq %rax, %rdi, %rax
; FALLBACK10-NEXT: orq %r10, %rax
-; FALLBACK10-NEXT: movq %r11, (%rdx)
+; FALLBACK10-NEXT: movq %rcx, (%rdx)
; FALLBACK10-NEXT: movq %rax, 8(%rdx)
-; FALLBACK10-NEXT: movq %rcx, 16(%rdx)
-; FALLBACK10-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK10-NEXT: movq %rsi, 16(%rdx)
+; FALLBACK10-NEXT: movq %r8, 24(%rdx)
; FALLBACK10-NEXT: vzeroupper
; FALLBACK10-NEXT: retq
;
@@ -6446,36 +6437,36 @@ define void @shl_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK14-LABEL: shl_32bytes:
; FALLBACK14: # %bb.0:
; FALLBACK14-NEXT: vmovups (%rdi), %ymm0
-; FALLBACK14-NEXT: movzbl (%rsi), %ecx
-; FALLBACK14-NEXT: leal (,%rcx,8), %eax
+; FALLBACK14-NEXT: movzbl (%rsi), %esi
+; FALLBACK14-NEXT: leal (,%rsi,8), %eax
; FALLBACK14-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FALLBACK14-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
; FALLBACK14-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; FALLBACK14-NEXT: andb $24, %cl
-; FALLBACK14-NEXT: negb %cl
-; FALLBACK14-NEXT: movsbq %cl, %rcx
-; FALLBACK14-NEXT: shlxq %rax, -16(%rsp,%rcx), %rsi
-; FALLBACK14-NEXT: movq -24(%rsp,%rcx), %rdi
-; FALLBACK14-NEXT: shlxq %rax, %rdi, %r8
-; FALLBACK14-NEXT: movq -40(%rsp,%rcx), %r9
-; FALLBACK14-NEXT: movq -32(%rsp,%rcx), %rcx
-; FALLBACK14-NEXT: shlxq %rax, %rcx, %r10
-; FALLBACK14-NEXT: shlxq %rax, %r9, %r11
-; FALLBACK14-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK14-NEXT: movl %eax, %ecx
+; FALLBACK14-NEXT: andb $24, %sil
+; FALLBACK14-NEXT: negb %sil
+; FALLBACK14-NEXT: movsbq %sil, %rsi
+; FALLBACK14-NEXT: shlxq %rcx, -16(%rsp,%rsi), %rdi
; FALLBACK14-NEXT: notb %al
+; FALLBACK14-NEXT: movq -24(%rsp,%rsi), %r8
+; FALLBACK14-NEXT: shlxq %rcx, %r8, %r9
+; FALLBACK14-NEXT: shrq %r8
+; FALLBACK14-NEXT: shrxq %rax, %r8, %r8
+; FALLBACK14-NEXT: orq %rdi, %r8
+; FALLBACK14-NEXT: movq -40(%rsp,%rsi), %rdi
+; FALLBACK14-NEXT: movq -32(%rsp,%rsi), %rsi
+; FALLBACK14-NEXT: shlxq %rcx, %rsi, %r10
+; FALLBACK14-NEXT: shrq %rsi
+; FALLBACK14-NEXT: shrxq %rax, %rsi, %rsi
+; FALLBACK14-NEXT: orq %r9, %rsi
+; FALLBACK14-NEXT: shlxq %rcx, %rdi, %rcx
; FALLBACK14-NEXT: shrq %rdi
-; FALLBACK14-NEXT: shrxq %rax, %rdi, %rdi
-; FALLBACK14-NEXT: orq %rsi, %rdi
-; FALLBACK14-NEXT: shrq %rcx
-; FALLBACK14-NEXT: shrxq %rax, %rcx, %rcx
-; FALLBACK14-NEXT: orq %r8, %rcx
-; FALLBACK14-NEXT: shrq %r9
-; FALLBACK14-NEXT: shrxq %rax, %r9, %rax
+; FALLBACK14-NEXT: shrxq %rax, %rdi, %rax
; FALLBACK14-NEXT: orq %r10, %rax
-; FALLBACK14-NEXT: movq %r11, (%rdx)
+; FALLBACK14-NEXT: movq %rcx, (%rdx)
; FALLBACK14-NEXT: movq %rax, 8(%rdx)
-; FALLBACK14-NEXT: movq %rcx, 16(%rdx)
-; FALLBACK14-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK14-NEXT: movq %rsi, 16(%rdx)
+; FALLBACK14-NEXT: movq %r8, 24(%rdx)
; FALLBACK14-NEXT: vzeroupper
; FALLBACK14-NEXT: retq
;
@@ -6745,71 +6736,75 @@ define void @shl_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %edx, %eax
+; FALLBACK18-NEXT: movl %eax, %ebp
; FALLBACK18-NEXT: andb $28, %bl
; FALLBACK18-NEXT: negb %bl
; FALLBACK18-NEXT: movsbl %bl, %esi
; FALLBACK18-NEXT: movl 64(%esp,%esi), %ebx
; FALLBACK18-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl 68(%esp,%esi), %eax
-; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shlxl %edx, %eax, %edi
-; FALLBACK18-NEXT: movl %edx, %ecx
-; FALLBACK18-NEXT: notb %cl
+; FALLBACK18-NEXT: movl 68(%esp,%esi), %ecx
+; FALLBACK18-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shlxl %eax, %ecx, %edi
+; FALLBACK18-NEXT: notb %dl
; FALLBACK18-NEXT: shrl %ebx
-; FALLBACK18-NEXT: shrxl %ecx, %ebx, %ebx
+; FALLBACK18-NEXT: shrxl %edx, %ebx, %ebx
; FALLBACK18-NEXT: orl %edi, %ebx
; FALLBACK18-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK18-NEXT: movl 72(%esp,%esi), %ebx
; FALLBACK18-NEXT: movl %ebx, %edi
; FALLBACK18-NEXT: shrl %edi
-; FALLBACK18-NEXT: shrxl %ecx, %edi, %eax
+; FALLBACK18-NEXT: shrxl %edx, %edi, %eax
; FALLBACK18-NEXT: movl 76(%esp,%esi), %edi
-; FALLBACK18-NEXT: shlxl %edx, %edi, %ebp
+; FALLBACK18-NEXT: movl %ebp, %esi
+; FALLBACK18-NEXT: shlxl %ebp, %edi, %ebp
; FALLBACK18-NEXT: orl %ebp, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shlxl %edx, %ebx, %ebx
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK18-NEXT: shrl %eax
-; FALLBACK18-NEXT: shrxl %ecx, %eax, %eax
-; FALLBACK18-NEXT: orl %ebx, %eax
-; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl 80(%esp,%esi), %ebx
-; FALLBACK18-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shlxl %esi, %ebx, %ebx
+; FALLBACK18-NEXT: shrl %ecx
+; FALLBACK18-NEXT: shrxl %edx, %ecx, %ecx
+; FALLBACK18-NEXT: orl %ebx, %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
+; FALLBACK18-NEXT: movl 80(%esp,%ebp), %ecx
+; FALLBACK18-NEXT: movl %ecx, %ebx
; FALLBACK18-NEXT: shrl %ebx
-; FALLBACK18-NEXT: shrxl %ecx, %ebx, %eax
-; FALLBACK18-NEXT: movl 84(%esp,%esi), %ebx
-; FALLBACK18-NEXT: shlxl %edx, %ebx, %ebp
+; FALLBACK18-NEXT: shrxl %edx, %ebx, %eax
+; FALLBACK18-NEXT: movl 84(%esp,%ebp), %ebx
+; FALLBACK18-NEXT: shlxl %esi, %ebx, %ebp
; FALLBACK18-NEXT: orl %ebp, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK18-NEXT: shlxl %esi, %ecx, %ecx
+; FALLBACK18-NEXT: movl %esi, %eax
; FALLBACK18-NEXT: shrl %edi
-; FALLBACK18-NEXT: shrxl %ecx, %edi, %edi
-; FALLBACK18-NEXT: orl %eax, %edi
-; FALLBACK18-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shlxl %edx, 92(%esp,%esi), %ebp
-; FALLBACK18-NEXT: movl 88(%esp,%esi), %esi
-; FALLBACK18-NEXT: shlxl %edx, %esi, %eax
+; FALLBACK18-NEXT: shrxl %edx, %edi, %edi
+; FALLBACK18-NEXT: orl %ecx, %edi
+; FALLBACK18-NEXT: shlxl %esi, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: shlxl %esi, 92(%esp,%ecx), %ebp
+; FALLBACK18-NEXT: movl 88(%esp,%ecx), %esi
+; FALLBACK18-NEXT: shlxl %eax, %esi, %ecx
; FALLBACK18-NEXT: shrl %esi
-; FALLBACK18-NEXT: shrxl %ecx, %esi, %esi
+; FALLBACK18-NEXT: shrxl %edx, %esi, %esi
; FALLBACK18-NEXT: orl %ebp, %esi
; FALLBACK18-NEXT: shrl %ebx
-; FALLBACK18-NEXT: shrxl %ecx, %ebx, %edx
-; FALLBACK18-NEXT: orl %eax, %edx
-; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %eax
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, (%eax)
-; FALLBACK18-NEXT: movl %edx, 24(%eax)
-; FALLBACK18-NEXT: movl %esi, 28(%eax)
-; FALLBACK18-NEXT: movl %edi, 16(%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, 20(%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, 8(%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, 12(%eax)
+; FALLBACK18-NEXT: shrxl %edx, %ebx, %eax
+; FALLBACK18-NEXT: orl %ecx, %eax
+; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %edx
; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, 4(%eax)
+; FALLBACK18-NEXT: movl %ecx, (%edx)
+; FALLBACK18-NEXT: movl %eax, 24(%edx)
+; FALLBACK18-NEXT: movl %esi, 28(%edx)
+; FALLBACK18-NEXT: movl %edi, 16(%edx)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, 20(%edx)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, 8(%edx)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, 12(%edx)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, 4(%edx)
; FALLBACK18-NEXT: addl $108, %esp
; FALLBACK18-NEXT: popl %esi
; FALLBACK18-NEXT: popl %edi
@@ -7085,78 +7080,76 @@ define void @shl_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %ecx
; FALLBACK22-NEXT: movups (%ecx), %xmm0
; FALLBACK22-NEXT: movups 16(%ecx), %xmm1
-; FALLBACK22-NEXT: movzbl (%eax), %ecx
-; FALLBACK22-NEXT: movl %ecx, %eax
-; FALLBACK22-NEXT: shlb $3, %al
+; FALLBACK22-NEXT: movzbl (%eax), %edx
+; FALLBACK22-NEXT: movl %edx, %ecx
+; FALLBACK22-NEXT: shlb $3, %cl
; FALLBACK22-NEXT: xorps %xmm2, %xmm2
; FALLBACK22-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
; FALLBACK22-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
; FALLBACK22-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
; FALLBACK22-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
-; FALLBACK22-NEXT: andb $28, %cl
-; FALLBACK22-NEXT: negb %cl
-; FALLBACK22-NEXT: movsbl %cl, %edx
-; FALLBACK22-NEXT: movl 84(%esp,%edx), %ecx
-; FALLBACK22-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shlxl %eax, %ecx, %ecx
-; FALLBACK22-NEXT: movl 80(%esp,%edx), %esi
-; FALLBACK22-NEXT: shlxl %eax, %esi, %edi
-; FALLBACK22-NEXT: movl %eax, %ebx
-; FALLBACK22-NEXT: notb %bl
-; FALLBACK22-NEXT: shrl %esi
-; FALLBACK22-NEXT: shrxl %ebx, %esi, %esi
-; FALLBACK22-NEXT: orl %ecx, %esi
-; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: movl 76(%esp,%edx), %ecx
-; FALLBACK22-NEXT: movl %ecx, %esi
-; FALLBACK22-NEXT: shrl %esi
-; FALLBACK22-NEXT: shrxl %ebx, %esi, %esi
-; FALLBACK22-NEXT: orl %edi, %esi
-; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shlxl %eax, %ecx, %ecx
-; FALLBACK22-NEXT: movl 72(%esp,%edx), %esi
-; FALLBACK22-NEXT: movl %esi, %edi
+; FALLBACK22-NEXT: movl %ecx, %ebx
+; FALLBACK22-NEXT: andb $28, %dl
+; FALLBACK22-NEXT: negb %dl
+; FALLBACK22-NEXT: movsbl %dl, %edx
+; FALLBACK22-NEXT: movl 84(%esp,%edx), %eax
+; FALLBACK22-NEXT: shlxl %ebx, %eax, %esi
+; FALLBACK22-NEXT: notb %cl
+; FALLBACK22-NEXT: movl 80(%esp,%edx), %edi
+; FALLBACK22-NEXT: shlxl %ebx, %edi, %ebp
; FALLBACK22-NEXT: shrl %edi
-; FALLBACK22-NEXT: shrxl %ebx, %edi, %edi
-; FALLBACK22-NEXT: orl %ecx, %edi
+; FALLBACK22-NEXT: shrxl %ecx, %edi, %edi
+; FALLBACK22-NEXT: orl %esi, %edi
; FALLBACK22-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shlxl %eax, %esi, %ecx
-; FALLBACK22-NEXT: movl 68(%esp,%edx), %esi
+; FALLBACK22-NEXT: movl 76(%esp,%edx), %esi
; FALLBACK22-NEXT: movl %esi, %edi
; FALLBACK22-NEXT: shrl %edi
-; FALLBACK22-NEXT: shrxl %ebx, %edi, %ebp
-; FALLBACK22-NEXT: orl %ecx, %ebp
-; FALLBACK22-NEXT: shlxl %eax, %esi, %edi
+; FALLBACK22-NEXT: shrxl %ecx, %edi, %edi
+; FALLBACK22-NEXT: orl %ebp, %edi
+; FALLBACK22-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shlxl %ebx, %esi, %esi
+; FALLBACK22-NEXT: movl 72(%esp,%edx), %edi
+; FALLBACK22-NEXT: movl %edi, %ebp
+; FALLBACK22-NEXT: shrl %ebp
+; FALLBACK22-NEXT: shrxl %ecx, %ebp, %ebp
+; FALLBACK22-NEXT: orl %esi, %ebp
+; FALLBACK22-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shlxl %ebx, %edi, %esi
+; FALLBACK22-NEXT: movl 68(%esp,%edx), %ebp
+; FALLBACK22-NEXT: movl %ebp, %edi
+; FALLBACK22-NEXT: shrl %edi
+; FALLBACK22-NEXT: shrxl %ecx, %edi, %edi
+; FALLBACK22-NEXT: orl %esi, %edi
+; FALLBACK22-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shlxl %ebx, %ebp, %ebp
; FALLBACK22-NEXT: movl 64(%esp,%edx), %esi
-; FALLBACK22-NEXT: movl %esi, %ecx
-; FALLBACK22-NEXT: shrl %ecx
-; FALLBACK22-NEXT: shrxl %ebx, %ecx, %ecx
-; FALLBACK22-NEXT: orl %edi, %ecx
-; FALLBACK22-NEXT: shlxl %eax, %esi, %esi
; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shlxl %eax, 92(%esp,%edx), %edi
-; FALLBACK22-NEXT: movl 88(%esp,%edx), %edx
-; FALLBACK22-NEXT: shlxl %eax, %edx, %esi
-; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: shrl %esi
+; FALLBACK22-NEXT: shrxl %ecx, %esi, %edi
+; FALLBACK22-NEXT: orl %ebp, %edi
; FALLBACK22-NEXT: shrl %eax
-; FALLBACK22-NEXT: shrxl %ebx, %eax, %eax
-; FALLBACK22-NEXT: orl %esi, %eax
-; FALLBACK22-NEXT: shrl %edx
-; FALLBACK22-NEXT: shrxl %ebx, %edx, %edx
-; FALLBACK22-NEXT: orl %edi, %edx
-; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %esi
-; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; FALLBACK22-NEXT: movl %edi, (%esi)
-; FALLBACK22-NEXT: movl %edx, 28(%esi)
-; FALLBACK22-NEXT: movl %eax, 24(%esi)
-; FALLBACK22-NEXT: movl %ecx, 4(%esi)
-; FALLBACK22-NEXT: movl %ebp, 8(%esi)
+; FALLBACK22-NEXT: shrxl %ecx, %eax, %esi
+; FALLBACK22-NEXT: movl 88(%esp,%edx), %eax
+; FALLBACK22-NEXT: shlxl %ebx, %eax, %ebp
+; FALLBACK22-NEXT: orl %ebp, %esi
+; FALLBACK22-NEXT: shlxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; FALLBACK22-NEXT: shlxl %ebx, 92(%esp,%edx), %edx
+; FALLBACK22-NEXT: shrl %eax
+; FALLBACK22-NEXT: shrxl %ecx, %eax, %eax
+; FALLBACK22-NEXT: orl %edx, %eax
+; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK22-NEXT: movl %ebp, (%ecx)
+; FALLBACK22-NEXT: movl %eax, 28(%ecx)
+; FALLBACK22-NEXT: movl %esi, 24(%ecx)
+; FALLBACK22-NEXT: movl %edi, 4(%ecx)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: movl %eax, 8(%ecx)
; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK22-NEXT: movl %eax, 12(%esi)
+; FALLBACK22-NEXT: movl %eax, 12(%ecx)
; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK22-NEXT: movl %eax, 16(%esi)
+; FALLBACK22-NEXT: movl %eax, 16(%ecx)
; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK22-NEXT: movl %eax, 20(%esi)
+; FALLBACK22-NEXT: movl %eax, 20(%ecx)
; FALLBACK22-NEXT: addl $108, %esp
; FALLBACK22-NEXT: popl %esi
; FALLBACK22-NEXT: popl %edi
@@ -7410,76 +7403,74 @@ define void @shl_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %eax
; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %ecx
; FALLBACK26-NEXT: vmovups (%ecx), %ymm0
-; FALLBACK26-NEXT: movzbl (%eax), %ecx
-; FALLBACK26-NEXT: movl %ecx, %eax
+; FALLBACK26-NEXT: movzbl (%eax), %edx
+; FALLBACK26-NEXT: movl %edx, %eax
; FALLBACK26-NEXT: shlb $3, %al
; FALLBACK26-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FALLBACK26-NEXT: vmovups %ymm1, {{[0-9]+}}(%esp)
; FALLBACK26-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
-; FALLBACK26-NEXT: andb $28, %cl
-; FALLBACK26-NEXT: negb %cl
-; FALLBACK26-NEXT: movsbl %cl, %edx
-; FALLBACK26-NEXT: movl 84(%esp,%edx), %ecx
-; FALLBACK26-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shlxl %eax, %ecx, %ecx
-; FALLBACK26-NEXT: movl 80(%esp,%edx), %esi
-; FALLBACK26-NEXT: shlxl %eax, %esi, %edi
; FALLBACK26-NEXT: movl %eax, %ebx
-; FALLBACK26-NEXT: notb %bl
-; FALLBACK26-NEXT: shrl %esi
-; FALLBACK26-NEXT: shrxl %ebx, %esi, %esi
-; FALLBACK26-NEXT: orl %ecx, %esi
-; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: movl 76(%esp,%edx), %ecx
-; FALLBACK26-NEXT: movl %ecx, %esi
-; FALLBACK26-NEXT: shrl %esi
-; FALLBACK26-NEXT: shrxl %ebx, %esi, %esi
-; FALLBACK26-NEXT: orl %edi, %esi
-; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shlxl %eax, %ecx, %ecx
-; FALLBACK26-NEXT: movl 72(%esp,%edx), %esi
-; FALLBACK26-NEXT: movl %esi, %edi
+; FALLBACK26-NEXT: andb $28, %dl
+; FALLBACK26-NEXT: negb %dl
+; FALLBACK26-NEXT: movsbl %dl, %edx
+; FALLBACK26-NEXT: movl 84(%esp,%edx), %ecx
+; FALLBACK26-NEXT: shlxl %ebx, %ecx, %esi
+; FALLBACK26-NEXT: notb %al
+; FALLBACK26-NEXT: movl 80(%esp,%edx), %edi
+; FALLBACK26-NEXT: shlxl %ebx, %edi, %ebp
; FALLBACK26-NEXT: shrl %edi
-; FALLBACK26-NEXT: shrxl %ebx, %edi, %edi
-; FALLBACK26-NEXT: orl %ecx, %edi
+; FALLBACK26-NEXT: shrxl %eax, %edi, %edi
+; FALLBACK26-NEXT: orl %esi, %edi
; FALLBACK26-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shlxl %eax, %esi, %ecx
-; FALLBACK26-NEXT: movl 68(%esp,%edx), %esi
+; FALLBACK26-NEXT: movl 76(%esp,%edx), %esi
; FALLBACK26-NEXT: movl %esi, %edi
; FALLBACK26-NEXT: shrl %edi
-; FALLBACK26-NEXT: shrxl %ebx, %edi, %ebp
-; FALLBACK26-NEXT: orl %ecx, %ebp
-; FALLBACK26-NEXT: shlxl %eax, %esi, %edi
+; FALLBACK26-NEXT: shrxl %eax, %edi, %edi
+; FALLBACK26-NEXT: orl %ebp, %edi
+; FALLBACK26-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shlxl %ebx, %esi, %esi
+; FALLBACK26-NEXT: movl 72(%esp,%edx), %edi
+; FALLBACK26-NEXT: movl %edi, %ebp
+; FALLBACK26-NEXT: shrl %ebp
+; FALLBACK26-NEXT: shrxl %eax, %ebp, %ebp
+; FALLBACK26-NEXT: orl %esi, %ebp
+; FALLBACK26-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shlxl %ebx, %edi, %esi
+; FALLBACK26-NEXT: movl 68(%esp,%edx), %ebp
+; FALLBACK26-NEXT: movl %ebp, %edi
+; FALLBACK26-NEXT: shrl %edi
+; FALLBACK26-NEXT: shrxl %eax, %edi, %edi
+; FALLBACK26-NEXT: orl %esi, %edi
+; FALLBACK26-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shlxl %ebx, %ebp, %ebp
; FALLBACK26-NEXT: movl 64(%esp,%edx), %esi
-; FALLBACK26-NEXT: movl %esi, %ecx
-; FALLBACK26-NEXT: shrl %ecx
-; FALLBACK26-NEXT: shrxl %ebx, %ecx, %ecx
-; FALLBACK26-NEXT: orl %edi, %ecx
-; FALLBACK26-NEXT: shlxl %eax, %esi, %esi
; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shlxl %eax, 92(%esp,%edx), %edi
-; FALLBACK26-NEXT: movl 88(%esp,%edx), %edx
-; FALLBACK26-NEXT: shlxl %eax, %edx, %esi
+; FALLBACK26-NEXT: shrl %esi
+; FALLBACK26-NEXT: shrxl %eax, %esi, %edi
+; FALLBACK26-NEXT: orl %ebp, %edi
+; FALLBACK26-NEXT: shrl %ecx
+; FALLBACK26-NEXT: shrxl %eax, %ecx, %esi
+; FALLBACK26-NEXT: movl 88(%esp,%edx), %ecx
+; FALLBACK26-NEXT: shlxl %ebx, %ecx, %ebp
+; FALLBACK26-NEXT: orl %ebp, %esi
+; FALLBACK26-NEXT: shlxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; FALLBACK26-NEXT: shlxl %ebx, 92(%esp,%edx), %edx
+; FALLBACK26-NEXT: shrl %ecx
+; FALLBACK26-NEXT: shrxl %eax, %ecx, %eax
+; FALLBACK26-NEXT: orl %edx, %eax
+; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK26-NEXT: movl %ebp, (%ecx)
+; FALLBACK26-NEXT: movl %eax, 28(%ecx)
+; FALLBACK26-NEXT: movl %esi, 24(%ecx)
+; FALLBACK26-NEXT: movl %edi, 4(%ecx)
; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK26-NEXT: shrl %eax
-; FALLBACK26-NEXT: shrxl %ebx, %eax, %eax
-; FALLBACK26-NEXT: orl %esi, %eax
-; FALLBACK26-NEXT: shrl %edx
-; FALLBACK26-NEXT: shrxl %ebx, %edx, %edx
-; FALLBACK26-NEXT: orl %edi, %edx
-; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %esi
-; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; FALLBACK26-NEXT: movl %edi, (%esi)
-; FALLBACK26-NEXT: movl %edx, 28(%esi)
-; FALLBACK26-NEXT: movl %eax, 24(%esi)
-; FALLBACK26-NEXT: movl %ecx, 4(%esi)
-; FALLBACK26-NEXT: movl %ebp, 8(%esi)
+; FALLBACK26-NEXT: movl %eax, 8(%ecx)
; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK26-NEXT: movl %eax, 12(%esi)
+; FALLBACK26-NEXT: movl %eax, 12(%ecx)
; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK26-NEXT: movl %eax, 16(%esi)
+; FALLBACK26-NEXT: movl %eax, 16(%ecx)
; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK26-NEXT: movl %eax, 20(%esi)
+; FALLBACK26-NEXT: movl %eax, 20(%ecx)
; FALLBACK26-NEXT: addl $108, %esp
; FALLBACK26-NEXT: popl %esi
; FALLBACK26-NEXT: popl %edi
@@ -7732,76 +7723,74 @@ define void @shl_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %eax
; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %ecx
; FALLBACK30-NEXT: vmovups (%ecx), %ymm0
-; FALLBACK30-NEXT: movzbl (%eax), %ecx
-; FALLBACK30-NEXT: movl %ecx, %eax
+; FALLBACK30-NEXT: movzbl (%eax), %edx
+; FALLBACK30-NEXT: movl %edx, %eax
; FALLBACK30-NEXT: shlb $3, %al
; FALLBACK30-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FALLBACK30-NEXT: vmovups %ymm1, {{[0-9]+}}(%esp)
; FALLBACK30-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
-; FALLBACK30-NEXT: andb $28, %cl
-; FALLBACK30-NEXT: negb %cl
-; FALLBACK30-NEXT: movsbl %cl, %edx
-; FALLBACK30-NEXT: movl 84(%esp,%edx), %ecx
-; FALLBACK30-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shlxl %eax, %ecx, %ecx
-; FALLBACK30-NEXT: movl 80(%esp,%edx), %esi
-; FALLBACK30-NEXT: shlxl %eax, %esi, %edi
; FALLBACK30-NEXT: movl %eax, %ebx
-; FALLBACK30-NEXT: notb %bl
-; FALLBACK30-NEXT: shrl %esi
-; FALLBACK30-NEXT: shrxl %ebx, %esi, %esi
-; FALLBACK30-NEXT: orl %ecx, %esi
-; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: movl 76(%esp,%edx), %ecx
-; FALLBACK30-NEXT: movl %ecx, %esi
-; FALLBACK30-NEXT: shrl %esi
-; FALLBACK30-NEXT: shrxl %ebx, %esi, %esi
-; FALLBACK30-NEXT: orl %edi, %esi
-; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shlxl %eax, %ecx, %ecx
-; FALLBACK30-NEXT: movl 72(%esp,%edx), %esi
-; FALLBACK30-NEXT: movl %esi, %edi
+; FALLBACK30-NEXT: andb $28, %dl
+; FALLBACK30-NEXT: negb %dl
+; FALLBACK30-NEXT: movsbl %dl, %edx
+; FALLBACK30-NEXT: movl 84(%esp,%edx), %ecx
+; FALLBACK30-NEXT: shlxl %ebx, %ecx, %esi
+; FALLBACK30-NEXT: notb %al
+; FALLBACK30-NEXT: movl 80(%esp,%edx), %edi
+; FALLBACK30-NEXT: shlxl %ebx, %edi, %ebp
; FALLBACK30-NEXT: shrl %edi
-; FALLBACK30-NEXT: shrxl %ebx, %edi, %edi
-; FALLBACK30-NEXT: orl %ecx, %edi
+; FALLBACK30-NEXT: shrxl %eax, %edi, %edi
+; FALLBACK30-NEXT: orl %esi, %edi
; FALLBACK30-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shlxl %eax, %esi, %ecx
-; FALLBACK30-NEXT: movl 68(%esp,%edx), %esi
+; FALLBACK30-NEXT: movl 76(%esp,%edx), %esi
; FALLBACK30-NEXT: movl %esi, %edi
; FALLBACK30-NEXT: shrl %edi
-; FALLBACK30-NEXT: shrxl %ebx, %edi, %ebp
-; FALLBACK30-NEXT: orl %ecx, %ebp
-; FALLBACK30-NEXT: shlxl %eax, %esi, %edi
+; FALLBACK30-NEXT: shrxl %eax, %edi, %edi
+; FALLBACK30-NEXT: orl %ebp, %edi
+; FALLBACK30-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shlxl %ebx, %esi, %esi
+; FALLBACK30-NEXT: movl 72(%esp,%edx), %edi
+; FALLBACK30-NEXT: movl %edi, %ebp
+; FALLBACK30-NEXT: shrl %ebp
+; FALLBACK30-NEXT: shrxl %eax, %ebp, %ebp
+; FALLBACK30-NEXT: orl %esi, %ebp
+; FALLBACK30-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shlxl %ebx, %edi, %esi
+; FALLBACK30-NEXT: movl 68(%esp,%edx), %ebp
+; FALLBACK30-NEXT: movl %ebp, %edi
+; FALLBACK30-NEXT: shrl %edi
+; FALLBACK30-NEXT: shrxl %eax, %edi, %edi
+; FALLBACK30-NEXT: orl %esi, %edi
+; FALLBACK30-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shlxl %ebx, %ebp, %ebp
; FALLBACK30-NEXT: movl 64(%esp,%edx), %esi
-; FALLBACK30-NEXT: movl %esi, %ecx
-; FALLBACK30-NEXT: shrl %ecx
-; FALLBACK30-NEXT: shrxl %ebx, %ecx, %ecx
-; FALLBACK30-NEXT: orl %edi, %ecx
-; FALLBACK30-NEXT: shlxl %eax, %esi, %esi
; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shlxl %eax, 92(%esp,%edx), %edi
-; FALLBACK30-NEXT: movl 88(%esp,%edx), %edx
-; FALLBACK30-NEXT: shlxl %eax, %edx, %esi
+; FALLBACK30-NEXT: shrl %esi
+; FALLBACK30-NEXT: shrxl %eax, %esi, %edi
+; FALLBACK30-NEXT: orl %ebp, %edi
+; FALLBACK30-NEXT: shrl %ecx
+; FALLBACK30-NEXT: shrxl %eax, %ecx, %esi
+; FALLBACK30-NEXT: movl 88(%esp,%edx), %ecx
+; FALLBACK30-NEXT: shlxl %ebx, %ecx, %ebp
+; FALLBACK30-NEXT: orl %ebp, %esi
+; FALLBACK30-NEXT: shlxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; FALLBACK30-NEXT: shlxl %ebx, 92(%esp,%edx), %edx
+; FALLBACK30-NEXT: shrl %ecx
+; FALLBACK30-NEXT: shrxl %eax, %ecx, %eax
+; FALLBACK30-NEXT: orl %edx, %eax
+; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK30-NEXT: movl %ebp, (%ecx)
+; FALLBACK30-NEXT: movl %eax, 28(%ecx)
+; FALLBACK30-NEXT: movl %esi, 24(%ecx)
+; FALLBACK30-NEXT: movl %edi, 4(%ecx)
; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK30-NEXT: shrl %eax
-; FALLBACK30-NEXT: shrxl %ebx, %eax, %eax
-; FALLBACK30-NEXT: orl %esi, %eax
-; FALLBACK30-NEXT: shrl %edx
-; FALLBACK30-NEXT: shrxl %ebx, %edx, %edx
-; FALLBACK30-NEXT: orl %edi, %edx
-; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %esi
-; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
-; FALLBACK30-NEXT: movl %edi, (%esi)
-; FALLBACK30-NEXT: movl %edx, 28(%esi)
-; FALLBACK30-NEXT: movl %eax, 24(%esi)
-; FALLBACK30-NEXT: movl %ecx, 4(%esi)
-; FALLBACK30-NEXT: movl %ebp, 8(%esi)
+; FALLBACK30-NEXT: movl %eax, 8(%ecx)
; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK30-NEXT: movl %eax, 12(%esi)
+; FALLBACK30-NEXT: movl %eax, 12(%ecx)
; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK30-NEXT: movl %eax, 16(%esi)
+; FALLBACK30-NEXT: movl %eax, 16(%ecx)
; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK30-NEXT: movl %eax, 20(%esi)
+; FALLBACK30-NEXT: movl %eax, 20(%ecx)
; FALLBACK30-NEXT: addl $108, %esp
; FALLBACK30-NEXT: popl %esi
; FALLBACK30-NEXT: popl %edi
@@ -7987,32 +7976,32 @@ define void @shl_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) nou
; FALLBACK2-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
; FALLBACK2-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
; FALLBACK2-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movl %eax, %ecx
; FALLBACK2-NEXT: shlb $2, %sil
; FALLBACK2-NEXT: andb $24, %sil
; FALLBACK2-NEXT: negb %sil
-; FALLBACK2-NEXT: movsbq %sil, %rsi
-; FALLBACK2-NEXT: movq -40(%rsp,%rsi), %rdi
-; FALLBACK2-NEXT: movq -32(%rsp,%rsi), %rcx
-; FALLBACK2-NEXT: shlxq %rax, %rcx, %r8
-; FALLBACK2-NEXT: shlxq %rax, -16(%rsp,%rsi), %r9
-; FALLBACK2-NEXT: movq -24(%rsp,%rsi), %rsi
-; FALLBACK2-NEXT: shlxq %rax, %rsi, %r10
-; FALLBACK2-NEXT: shlxq %rax, %rdi, %r11
-; FALLBACK2-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK2-NEXT: movsbq %sil, %rdi
+; FALLBACK2-NEXT: movq -40(%rsp,%rdi), %r8
+; FALLBACK2-NEXT: movq -32(%rsp,%rdi), %rsi
+; FALLBACK2-NEXT: shlxq %rcx, %rsi, %r9
; FALLBACK2-NEXT: notb %al
+; FALLBACK2-NEXT: shlxq %rcx, %r8, %r10
+; FALLBACK2-NEXT: shrq %r8
+; FALLBACK2-NEXT: shrxq %rax, %r8, %r8
+; FALLBACK2-NEXT: orq %r9, %r8
+; FALLBACK2-NEXT: shlxq %rcx, -16(%rsp,%rdi), %r9
+; FALLBACK2-NEXT: movq -24(%rsp,%rdi), %rdi
+; FALLBACK2-NEXT: shlxq %rcx, %rdi, %rcx
; FALLBACK2-NEXT: shrq %rdi
; FALLBACK2-NEXT: shrxq %rax, %rdi, %rdi
-; FALLBACK2-NEXT: orq %r8, %rdi
+; FALLBACK2-NEXT: orq %r9, %rdi
; FALLBACK2-NEXT: shrq %rsi
-; FALLBACK2-NEXT: shrxq %rax, %rsi, %rsi
-; FALLBACK2-NEXT: orq %r9, %rsi
-; FALLBACK2-NEXT: shrq %rcx
-; FALLBACK2-NEXT: shrxq %rax, %rcx, %rax
-; FALLBACK2-NEXT: orq %r10, %rax
-; FALLBACK2-NEXT: movq %r11, (%rdx)
+; FALLBACK2-NEXT: shrxq %rax, %rsi, %rax
+; FALLBACK2-NEXT: orq %rcx, %rax
+; FALLBACK2-NEXT: movq %r10, (%rdx)
; FALLBACK2-NEXT: movq %rax, 16(%rdx)
-; FALLBACK2-NEXT: movq %rsi, 24(%rdx)
-; FALLBACK2-NEXT: movq %rdi, 8(%rdx)
+; FALLBACK2-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK2-NEXT: movq %r8, 8(%rdx)
; FALLBACK2-NEXT: retq
;
; FALLBACK3-LABEL: shl_32bytes_dwordOff:
@@ -8135,40 +8124,40 @@ define void @shl_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) nou
; FALLBACK6: # %bb.0:
; FALLBACK6-NEXT: movups (%rdi), %xmm0
; FALLBACK6-NEXT: movups 16(%rdi), %xmm1
-; FALLBACK6-NEXT: movzbl (%rsi), %ecx
-; FALLBACK6-NEXT: movl %ecx, %eax
+; FALLBACK6-NEXT: movzbl (%rsi), %esi
+; FALLBACK6-NEXT: movl %esi, %eax
; FALLBACK6-NEXT: shlb $5, %al
; FALLBACK6-NEXT: xorps %xmm2, %xmm2
; FALLBACK6-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; FALLBACK6-NEXT: shlb $2, %cl
-; FALLBACK6-NEXT: andb $24, %cl
-; FALLBACK6-NEXT: negb %cl
-; FALLBACK6-NEXT: movsbq %cl, %rcx
-; FALLBACK6-NEXT: shlxq %rax, -16(%rsp,%rcx), %rsi
-; FALLBACK6-NEXT: movq -24(%rsp,%rcx), %rdi
-; FALLBACK6-NEXT: shlxq %rax, %rdi, %r8
-; FALLBACK6-NEXT: movq -40(%rsp,%rcx), %r9
-; FALLBACK6-NEXT: movq -32(%rsp,%rcx), %rcx
-; FALLBACK6-NEXT: shlxq %rax, %rcx, %r10
-; FALLBACK6-NEXT: shlxq %rax, %r9, %r11
-; FALLBACK6-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK6-NEXT: movl %eax, %ecx
+; FALLBACK6-NEXT: shlb $2, %sil
+; FALLBACK6-NEXT: andb $24, %sil
+; FALLBACK6-NEXT: negb %sil
+; FALLBACK6-NEXT: movsbq %sil, %rsi
+; FALLBACK6-NEXT: shlxq %rcx, -16(%rsp,%rsi), %rdi
; FALLBACK6-NEXT: notb %al
+; FALLBACK6-NEXT: movq -24(%rsp,%rsi), %r8
+; FALLBACK6-NEXT: shlxq %rcx, %r8, %r9
+; FALLBACK6-NEXT: shrq %r8
+; FALLBACK6-NEXT: shrxq %rax, %r8, %r8
+; FALLBACK6-NEXT: orq %rdi, %r8
+; FALLBACK6-NEXT: movq -40(%rsp,%rsi), %rdi
+; FALLBACK6-NEXT: movq -32(%rsp,%rsi), %rsi
+; FALLBACK6-NEXT: shlxq %rcx, %rsi, %r10
+; FALLBACK6-NEXT: shrq %rsi
+; FALLBACK6-NEXT: shrxq %rax, %rsi, %rsi
+; FALLBACK6-NEXT: orq %r9, %rsi
+; FALLBACK6-NEXT: shlxq %rcx, %rdi, %rcx
; FALLBACK6-NEXT: shrq %rdi
-; FALLBACK6-NEXT: shrxq %rax, %rdi, %rdi
-; FALLBACK6-NEXT: orq %rsi, %rdi
-; FALLBACK6-NEXT: shrq %rcx
-; FALLBACK6-NEXT: shrxq %rax, %rcx, %rcx
-; FALLBACK6-NEXT: orq %r8, %rcx
-; FALLBACK6-NEXT: shrq %r9
-; FALLBACK6-NEXT: shrxq %rax, %r9, %rax
+; FALLBACK6-NEXT: shrxq %rax, %rdi, %rax
; FALLBACK6-NEXT: orq %r10, %rax
-; FALLBACK6-NEXT: movq %r11, (%rdx)
+; FALLBACK6-NEXT: movq %rcx, (%rdx)
; FALLBACK6-NEXT: movq %rax, 8(%rdx)
-; FALLBACK6-NEXT: movq %rcx, 16(%rdx)
-; FALLBACK6-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK6-NEXT: movq %rsi, 16(%rdx)
+; FALLBACK6-NEXT: movq %r8, 24(%rdx)
; FALLBACK6-NEXT: retq
;
; FALLBACK7-LABEL: shl_32bytes_dwordOff:
@@ -8283,38 +8272,38 @@ define void @shl_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) nou
; FALLBACK10-LABEL: shl_32bytes_dwordOff:
; FALLBACK10: # %bb.0:
; FALLBACK10-NEXT: vmovups (%rdi), %ymm0
-; FALLBACK10-NEXT: movzbl (%rsi), %ecx
-; FALLBACK10-NEXT: movl %ecx, %eax
+; FALLBACK10-NEXT: movzbl (%rsi), %esi
+; FALLBACK10-NEXT: movl %esi, %eax
; FALLBACK10-NEXT: shlb $5, %al
; FALLBACK10-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FALLBACK10-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
; FALLBACK10-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; FALLBACK10-NEXT: shlb $2, %cl
-; FALLBACK10-NEXT: andb $24, %cl
-; FALLBACK10-NEXT: negb %cl
-; FALLBACK10-NEXT: movsbq %cl, %rcx
-; FALLBACK10-NEXT: shlxq %rax, -16(%rsp,%rcx), %rsi
-; FALLBACK10-NEXT: movq -24(%rsp,%rcx), %rdi
-; FALLBACK10-NEXT: shlxq %rax, %rdi, %r8
-; FALLBACK10-NEXT: movq -40(%rsp,%rcx), %r9
-; FALLBACK10-NEXT: movq -32(%rsp,%rcx), %rcx
-; FALLBACK10-NEXT: shlxq %rax, %rcx, %r10
-; FALLBACK10-NEXT: shlxq %rax, %r9, %r11
-; FALLBACK10-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK10-NEXT: movl %eax, %ecx
+; FALLBACK10-NEXT: shlb $2, %sil
+; FALLBACK10-NEXT: andb $24, %sil
+; FALLBACK10-NEXT: negb %sil
+; FALLBACK10-NEXT: movsbq %sil, %rsi
+; FALLBACK10-NEXT: shlxq %rcx, -16(%rsp,%rsi), %rdi
; FALLBACK10-NEXT: notb %al
+; FALLBACK10-NEXT: movq -24(%rsp,%rsi), %r8
+; FALLBACK10-NEXT: shlxq %rcx, %r8, %r9
+; FALLBACK10-NEXT: shrq %r8
+; FALLBACK10-NEXT: shrxq %rax, %r8, %r8
+; FALLBACK10-NEXT: orq %rdi, %r8
+; FALLBACK10-NEXT: movq -40(%rsp,%rsi), %rdi
+; FALLBACK10-NEXT: movq -32(%rsp,%rsi), %rsi
+; FALLBACK10-NEXT: shlxq %rcx, %rsi, %r10
+; FALLBACK10-NEXT: shrq %rsi
+; FALLBACK10-NEXT: shrxq %rax, %rsi, %rsi
+; FALLBACK10-NEXT: orq %r9, %rsi
+; FALLBACK10-NEXT: shlxq %rcx, %rdi, %rcx
; FALLBACK10-NEXT: shrq %rdi
-; FALLBACK10-NEXT: shrxq %rax, %rdi, %rdi
-; FALLBACK10-NEXT: orq %rsi, %rdi
-; FALLBACK10-NEXT: shrq %rcx
-; FALLBACK10-NEXT: shrxq %rax, %rcx, %rcx
-; FALLBACK10-NEXT: orq %r8, %rcx
-; FALLBACK10-NEXT: shrq %r9
-; FALLBACK10-NEXT: shrxq %rax, %r9, %rax
+; FALLBACK10-NEXT: shrxq %rax, %rdi, %rax
; FALLBACK10-NEXT: orq %r10, %rax
-; FALLBACK10-NEXT: movq %r11, (%rdx)
+; FALLBACK10-NEXT: movq %rcx, (%rdx)
; FALLBACK10-NEXT: movq %rax, 8(%rdx)
-; FALLBACK10-NEXT: movq %rcx, 16(%rdx)
-; FALLBACK10-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK10-NEXT: movq %rsi, 16(%rdx)
+; FALLBACK10-NEXT: movq %r8, 24(%rdx)
; FALLBACK10-NEXT: vzeroupper
; FALLBACK10-NEXT: retq
;
@@ -8428,38 +8417,38 @@ define void @shl_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) nou
; FALLBACK14-LABEL: shl_32bytes_dwordOff:
; FALLBACK14: # %bb.0:
; FALLBACK14-NEXT: vmovups (%rdi), %ymm0
-; FALLBACK14-NEXT: movzbl (%rsi), %ecx
-; FALLBACK14-NEXT: movl %ecx, %eax
+; FALLBACK14-NEXT: movzbl (%rsi), %esi
+; FALLBACK14-NEXT: movl %esi, %eax
; FALLBACK14-NEXT: shlb $5, %al
; FALLBACK14-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FALLBACK14-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
; FALLBACK14-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; FALLBACK14-NEXT: shlb $2, %cl
-; FALLBACK14-NEXT: andb $24, %cl
-; FALLBACK14-NEXT: negb %cl
-; FALLBACK14-NEXT: movsbq %cl, %rcx
-; FALLBACK14-NEXT: shlxq %rax, -16(%rsp,%rcx), %rsi
-; FALLBACK14-NEXT: movq -24(%rsp,%rcx), %rdi
-; FALLBACK14-NEXT: shlxq %rax, %rdi, %r8
-; FALLBACK14-NEXT: movq -40(%rsp,%rcx), %r9
-; FALLBACK14-NEXT: movq -32(%rsp,%rcx), %rcx
-; FALLBACK14-NEXT: shlxq %rax, %rcx, %r10
-; FALLBACK14-NEXT: shlxq %rax, %r9, %r11
-; FALLBACK14-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK14-NEXT: movl %eax, %ecx
+; FALLBACK14-NEXT: shlb $2, %sil
+; FALLBACK14-NEXT: andb $24, %sil
+; FALLBACK14-NEXT: negb %sil
+; FALLBACK14-NEXT: movsbq %sil, %rsi
+; FALLBACK14-NEXT: shlxq %rcx, -16(%rsp,%rsi), %rdi
; FALLBACK14-NEXT: notb %al
+; FALLBACK14-NEXT: movq -24(%rsp,%rsi), %r8
+; FALLBACK14-NEXT: shlxq %rcx, %r8, %r9
+; FALLBACK14-NEXT: shrq %r8
+; FALLBACK14-NEXT: shrxq %rax, %r8, %r8
+; FALLBACK14-NEXT: orq %rdi, %r8
+; FALLBACK14-NEXT: movq -40(%rsp,%rsi), %rdi
+; FALLBACK14-NEXT: movq -32(%rsp,%rsi), %rsi
+; FALLBACK14-NEXT: shlxq %rcx, %rsi, %r10
+; FALLBACK14-NEXT: shrq %rsi
+; FALLBACK14-NEXT: shrxq %rax, %rsi, %rsi
+; FALLBACK14-NEXT: orq %r9, %rsi
+; FALLBACK14-NEXT: shlxq %rcx, %rdi, %rcx
; FALLBACK14-NEXT: shrq %rdi
-; FALLBACK14-NEXT: shrxq %rax, %rdi, %rdi
-; FALLBACK14-NEXT: orq %rsi, %rdi
-; FALLBACK14-NEXT: shrq %rcx
-; FALLBACK14-NEXT: shrxq %rax, %rcx, %rcx
-; FALLBACK14-NEXT: orq %r8, %rcx
-; FALLBACK14-NEXT: shrq %r9
-; FALLBACK14-NEXT: shrxq %rax, %r9, %rax
+; FALLBACK14-NEXT: shrxq %rax, %rdi, %rax
; FALLBACK14-NEXT: orq %r10, %rax
-; FALLBACK14-NEXT: movq %r11, (%rdx)
+; FALLBACK14-NEXT: movq %rcx, (%rdx)
; FALLBACK14-NEXT: movq %rax, 8(%rdx)
-; FALLBACK14-NEXT: movq %rcx, 16(%rdx)
-; FALLBACK14-NEXT: movq %rdi, 24(%rdx)
+; FALLBACK14-NEXT: movq %rsi, 16(%rdx)
+; FALLBACK14-NEXT: movq %r8, 24(%rdx)
; FALLBACK14-NEXT: vzeroupper
; FALLBACK14-NEXT: retq
;
@@ -8906,30 +8895,30 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movl %eax, %ecx
; FALLBACK2-NEXT: andb $24, %sil
-; FALLBACK2-NEXT: movzbl %sil, %ecx
-; FALLBACK2-NEXT: movq -64(%rsp,%rcx), %rsi
-; FALLBACK2-NEXT: movq -56(%rsp,%rcx), %rdi
-; FALLBACK2-NEXT: shrxq %rax, %rsi, %r8
-; FALLBACK2-NEXT: shrxq %rax, -72(%rsp,%rcx), %r9
-; FALLBACK2-NEXT: shrxq %rax, %rdi, %r10
-; FALLBACK2-NEXT: movq -48(%rsp,%rcx), %rcx
-; FALLBACK2-NEXT: sarxq %rax, %rcx, %r11
-; FALLBACK2-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK2-NEXT: movzbl %sil, %esi
+; FALLBACK2-NEXT: movq -64(%rsp,%rsi), %rdi
+; FALLBACK2-NEXT: movq -56(%rsp,%rsi), %r8
+; FALLBACK2-NEXT: shrxq %rcx, %rdi, %r9
; FALLBACK2-NEXT: notb %al
+; FALLBACK2-NEXT: leaq (%r8,%r8), %r10
+; FALLBACK2-NEXT: shlxq %rax, %r10, %r10
+; FALLBACK2-NEXT: orq %r9, %r10
+; FALLBACK2-NEXT: shrxq %rcx, -72(%rsp,%rsi), %r9
; FALLBACK2-NEXT: addq %rdi, %rdi
; FALLBACK2-NEXT: shlxq %rax, %rdi, %rdi
-; FALLBACK2-NEXT: orq %r8, %rdi
-; FALLBACK2-NEXT: addq %rsi, %rsi
-; FALLBACK2-NEXT: shlxq %rax, %rsi, %rsi
-; FALLBACK2-NEXT: orq %r9, %rsi
-; FALLBACK2-NEXT: addq %rcx, %rcx
-; FALLBACK2-NEXT: shlxq %rax, %rcx, %rax
-; FALLBACK2-NEXT: orq %r10, %rax
-; FALLBACK2-NEXT: movq %r11, 24(%rdx)
+; FALLBACK2-NEXT: orq %r9, %rdi
+; FALLBACK2-NEXT: shrxq %rcx, %r8, %r8
+; FALLBACK2-NEXT: movq -48(%rsp,%rsi), %rsi
+; FALLBACK2-NEXT: leaq (%rsi,%rsi), %r9
+; FALLBACK2-NEXT: shlxq %rax, %r9, %rax
+; FALLBACK2-NEXT: orq %r8, %rax
+; FALLBACK2-NEXT: sarxq %rcx, %rsi, %rcx
+; FALLBACK2-NEXT: movq %rcx, 24(%rdx)
; FALLBACK2-NEXT: movq %rax, 16(%rdx)
-; FALLBACK2-NEXT: movq %rsi, (%rdx)
-; FALLBACK2-NEXT: movq %rdi, 8(%rdx)
+; FALLBACK2-NEXT: movq %rdi, (%rdx)
+; FALLBACK2-NEXT: movq %r10, 8(%rdx)
; FALLBACK2-NEXT: retq
;
; FALLBACK3-LABEL: ashr_32bytes:
@@ -9067,30 +9056,30 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK6-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movl %eax, %ecx
; FALLBACK6-NEXT: andb $24, %sil
-; FALLBACK6-NEXT: movzbl %sil, %ecx
-; FALLBACK6-NEXT: shrxq %rax, -72(%rsp,%rcx), %rsi
-; FALLBACK6-NEXT: movq -64(%rsp,%rcx), %rdi
-; FALLBACK6-NEXT: movq -56(%rsp,%rcx), %r8
-; FALLBACK6-NEXT: shrxq %rax, %r8, %r9
-; FALLBACK6-NEXT: movq -48(%rsp,%rcx), %rcx
-; FALLBACK6-NEXT: shrxq %rax, %rdi, %r10
-; FALLBACK6-NEXT: sarxq %rax, %rcx, %r11
-; FALLBACK6-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK6-NEXT: movzbl %sil, %esi
+; FALLBACK6-NEXT: shrxq %rcx, -72(%rsp,%rsi), %rdi
; FALLBACK6-NEXT: notb %al
-; FALLBACK6-NEXT: addq %rdi, %rdi
-; FALLBACK6-NEXT: shlxq %rax, %rdi, %rdi
-; FALLBACK6-NEXT: orq %rsi, %rdi
-; FALLBACK6-NEXT: addq %rcx, %rcx
-; FALLBACK6-NEXT: shlxq %rax, %rcx, %rcx
-; FALLBACK6-NEXT: orq %r9, %rcx
-; FALLBACK6-NEXT: addq %r8, %r8
-; FALLBACK6-NEXT: shlxq %rax, %r8, %rax
-; FALLBACK6-NEXT: orq %r10, %rax
-; FALLBACK6-NEXT: movq %r11, 24(%rdx)
+; FALLBACK6-NEXT: movq -64(%rsp,%rsi), %r8
+; FALLBACK6-NEXT: movq -56(%rsp,%rsi), %r9
+; FALLBACK6-NEXT: leaq (%r8,%r8), %r10
+; FALLBACK6-NEXT: shlxq %rax, %r10, %r10
+; FALLBACK6-NEXT: orq %rdi, %r10
+; FALLBACK6-NEXT: shrxq %rcx, %r9, %rdi
+; FALLBACK6-NEXT: movq -48(%rsp,%rsi), %rsi
+; FALLBACK6-NEXT: leaq (%rsi,%rsi), %r11
+; FALLBACK6-NEXT: shlxq %rax, %r11, %r11
+; FALLBACK6-NEXT: orq %rdi, %r11
+; FALLBACK6-NEXT: shrxq %rcx, %r8, %rdi
+; FALLBACK6-NEXT: addq %r9, %r9
+; FALLBACK6-NEXT: shlxq %rax, %r9, %rax
+; FALLBACK6-NEXT: orq %rdi, %rax
+; FALLBACK6-NEXT: sarxq %rcx, %rsi, %rcx
+; FALLBACK6-NEXT: movq %rcx, 24(%rdx)
; FALLBACK6-NEXT: movq %rax, 8(%rdx)
-; FALLBACK6-NEXT: movq %rcx, 16(%rdx)
-; FALLBACK6-NEXT: movq %rdi, (%rdx)
+; FALLBACK6-NEXT: movq %r11, 16(%rdx)
+; FALLBACK6-NEXT: movq %r10, (%rdx)
; FALLBACK6-NEXT: retq
;
; FALLBACK7-LABEL: ashr_32bytes:
@@ -9227,30 +9216,30 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK10-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK10-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK10-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: movl %eax, %ecx
; FALLBACK10-NEXT: andb $24, %sil
-; FALLBACK10-NEXT: movzbl %sil, %ecx
-; FALLBACK10-NEXT: shrxq %rax, -72(%rsp,%rcx), %rsi
-; FALLBACK10-NEXT: movq -64(%rsp,%rcx), %rdi
-; FALLBACK10-NEXT: movq -56(%rsp,%rcx), %r8
-; FALLBACK10-NEXT: shrxq %rax, %r8, %r9
-; FALLBACK10-NEXT: movq -48(%rsp,%rcx), %rcx
-; FALLBACK10-NEXT: shrxq %rax, %rdi, %r10
-; FALLBACK10-NEXT: sarxq %rax, %rcx, %r11
-; FALLBACK10-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK10-NEXT: movzbl %sil, %esi
+; FALLBACK10-NEXT: shrxq %rcx, -72(%rsp,%rsi), %rdi
; FALLBACK10-NEXT: notb %al
-; FALLBACK10-NEXT: addq %rdi, %rdi
-; FALLBACK10-NEXT: shlxq %rax, %rdi, %rdi
-; FALLBACK10-NEXT: orq %rsi, %rdi
-; FALLBACK10-NEXT: addq %rcx, %rcx
-; FALLBACK10-NEXT: shlxq %rax, %rcx, %rcx
-; FALLBACK10-NEXT: orq %r9, %rcx
-; FALLBACK10-NEXT: addq %r8, %r8
-; FALLBACK10-NEXT: shlxq %rax, %r8, %rax
-; FALLBACK10-NEXT: orq %r10, %rax
-; FALLBACK10-NEXT: movq %r11, 24(%rdx)
+; FALLBACK10-NEXT: movq -64(%rsp,%rsi), %r8
+; FALLBACK10-NEXT: movq -56(%rsp,%rsi), %r9
+; FALLBACK10-NEXT: leaq (%r8,%r8), %r10
+; FALLBACK10-NEXT: shlxq %rax, %r10, %r10
+; FALLBACK10-NEXT: orq %rdi, %r10
+; FALLBACK10-NEXT: shrxq %rcx, %r9, %rdi
+; FALLBACK10-NEXT: movq -48(%rsp,%rsi), %rsi
+; FALLBACK10-NEXT: leaq (%rsi,%rsi), %r11
+; FALLBACK10-NEXT: shlxq %rax, %r11, %r11
+; FALLBACK10-NEXT: orq %rdi, %r11
+; FALLBACK10-NEXT: shrxq %rcx, %r8, %rdi
+; FALLBACK10-NEXT: addq %r9, %r9
+; FALLBACK10-NEXT: shlxq %rax, %r9, %rax
+; FALLBACK10-NEXT: orq %rdi, %rax
+; FALLBACK10-NEXT: sarxq %rcx, %rsi, %rcx
+; FALLBACK10-NEXT: movq %rcx, 24(%rdx)
; FALLBACK10-NEXT: movq %rax, 8(%rdx)
-; FALLBACK10-NEXT: movq %rcx, 16(%rdx)
-; FALLBACK10-NEXT: movq %rdi, (%rdx)
+; FALLBACK10-NEXT: movq %r11, 16(%rdx)
+; FALLBACK10-NEXT: movq %r10, (%rdx)
; FALLBACK10-NEXT: retq
;
; FALLBACK11-LABEL: ashr_32bytes:
@@ -9387,30 +9376,30 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK14-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK14-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK14-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK14-NEXT: movl %eax, %ecx
; FALLBACK14-NEXT: andb $24, %sil
-; FALLBACK14-NEXT: movzbl %sil, %ecx
-; FALLBACK14-NEXT: shrxq %rax, -72(%rsp,%rcx), %rsi
-; FALLBACK14-NEXT: movq -64(%rsp,%rcx), %rdi
-; FALLBACK14-NEXT: movq -56(%rsp,%rcx), %r8
-; FALLBACK14-NEXT: shrxq %rax, %r8, %r9
-; FALLBACK14-NEXT: movq -48(%rsp,%rcx), %rcx
-; FALLBACK14-NEXT: shrxq %rax, %rdi, %r10
-; FALLBACK14-NEXT: sarxq %rax, %rcx, %r11
-; FALLBACK14-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK14-NEXT: movzbl %sil, %esi
+; FALLBACK14-NEXT: shrxq %rcx, -72(%rsp,%rsi), %rdi
; FALLBACK14-NEXT: notb %al
-; FALLBACK14-NEXT: addq %rdi, %rdi
-; FALLBACK14-NEXT: shlxq %rax, %rdi, %rdi
-; FALLBACK14-NEXT: orq %rsi, %rdi
-; FALLBACK14-NEXT: addq %rcx, %rcx
-; FALLBACK14-NEXT: shlxq %rax, %rcx, %rcx
-; FALLBACK14-NEXT: orq %r9, %rcx
-; FALLBACK14-NEXT: addq %r8, %r8
-; FALLBACK14-NEXT: shlxq %rax, %r8, %rax
-; FALLBACK14-NEXT: orq %r10, %rax
-; FALLBACK14-NEXT: movq %r11, 24(%rdx)
+; FALLBACK14-NEXT: movq -64(%rsp,%rsi), %r8
+; FALLBACK14-NEXT: movq -56(%rsp,%rsi), %r9
+; FALLBACK14-NEXT: leaq (%r8,%r8), %r10
+; FALLBACK14-NEXT: shlxq %rax, %r10, %r10
+; FALLBACK14-NEXT: orq %rdi, %r10
+; FALLBACK14-NEXT: shrxq %rcx, %r9, %rdi
+; FALLBACK14-NEXT: movq -48(%rsp,%rsi), %rsi
+; FALLBACK14-NEXT: leaq (%rsi,%rsi), %r11
+; FALLBACK14-NEXT: shlxq %rax, %r11, %r11
+; FALLBACK14-NEXT: orq %rdi, %r11
+; FALLBACK14-NEXT: shrxq %rcx, %r8, %rdi
+; FALLBACK14-NEXT: addq %r9, %r9
+; FALLBACK14-NEXT: shlxq %rax, %r9, %rax
+; FALLBACK14-NEXT: orq %rdi, %rax
+; FALLBACK14-NEXT: sarxq %rcx, %rsi, %rcx
+; FALLBACK14-NEXT: movq %rcx, 24(%rdx)
; FALLBACK14-NEXT: movq %rax, 8(%rdx)
-; FALLBACK14-NEXT: movq %rcx, 16(%rdx)
-; FALLBACK14-NEXT: movq %rdi, (%rdx)
+; FALLBACK14-NEXT: movq %r11, 16(%rdx)
+; FALLBACK14-NEXT: movq %r10, (%rdx)
; FALLBACK14-NEXT: retq
;
; FALLBACK15-LABEL: ashr_32bytes:
@@ -9671,7 +9660,7 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK18-NEXT: pushl %edi
; FALLBACK18-NEXT: pushl %esi
; FALLBACK18-NEXT: subl $108, %esp
-; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %edx
; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %esi
; FALLBACK18-NEXT: movl (%esi), %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
@@ -9680,22 +9669,22 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK18-NEXT: movl 8(%esi), %ebx
; FALLBACK18-NEXT: movl 12(%esi), %ebp
; FALLBACK18-NEXT: movl 16(%esi), %edi
-; FALLBACK18-NEXT: movzbl (%ecx), %ecx
-; FALLBACK18-NEXT: movl 20(%esi), %edx
+; FALLBACK18-NEXT: movzbl (%edx), %edx
+; FALLBACK18-NEXT: movl 20(%esi), %ecx
; FALLBACK18-NEXT: movl 24(%esi), %eax
; FALLBACK18-NEXT: movl 28(%esi), %esi
; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; FALLBACK18-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; FALLBACK18-NEXT: movl %ecx, %eax
-; FALLBACK18-NEXT: shlb $3, %al
+; FALLBACK18-NEXT: movl %edx, %ecx
+; FALLBACK18-NEXT: shlb $3, %cl
; FALLBACK18-NEXT: movl %esi, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movl %ebp, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movl %ebx, {{[0-9]+}}(%esp)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; FALLBACK18-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; FALLBACK18-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: sarl $31, %esi
; FALLBACK18-NEXT: movl %esi, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movl %esi, {{[0-9]+}}(%esp)
@@ -9705,66 +9694,65 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK18-NEXT: movl %esi, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movl %esi, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; FALLBACK18-NEXT: andb $28, %cl
-; FALLBACK18-NEXT: movzbl %cl, %edi
-; FALLBACK18-NEXT: movl 36(%esp,%edi), %esi
-; FALLBACK18-NEXT: movl 40(%esp,%edi), %ecx
-; FALLBACK18-NEXT: shrxl %eax, %esi, %ebx
-; FALLBACK18-NEXT: movl %eax, %edx
-; FALLBACK18-NEXT: notb %dl
-; FALLBACK18-NEXT: leal (%ecx,%ecx), %ebp
-; FALLBACK18-NEXT: shlxl %edx, %ebp, %ebp
-; FALLBACK18-NEXT: orl %ebx, %ebp
-; FALLBACK18-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shrxl %eax, 32(%esp,%edi), %ebx
-; FALLBACK18-NEXT: addl %esi, %esi
-; FALLBACK18-NEXT: shlxl %edx, %esi, %esi
-; FALLBACK18-NEXT: orl %ebx, %esi
-; FALLBACK18-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl 48(%esp,%edi), %esi
-; FALLBACK18-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: leal (%esi,%esi), %ebx
-; FALLBACK18-NEXT: shlxl %edx, %ebx, %esi
-; FALLBACK18-NEXT: movl 44(%esp,%edi), %ebp
-; FALLBACK18-NEXT: shrxl %eax, %ebp, %ebx
-; FALLBACK18-NEXT: orl %ebx, %esi
-; FALLBACK18-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shrxl %eax, %ecx, %ecx
-; FALLBACK18-NEXT: movl %eax, %ebx
-; FALLBACK18-NEXT: addl %ebp, %ebp
-; FALLBACK18-NEXT: shlxl %edx, %ebp, %eax
-; FALLBACK18-NEXT: orl %ecx, %eax
+; FALLBACK18-NEXT: movl %ecx, %eax
+; FALLBACK18-NEXT: andb $28, %dl
+; FALLBACK18-NEXT: movzbl %dl, %esi
+; FALLBACK18-NEXT: movl 36(%esp,%esi), %edx
+; FALLBACK18-NEXT: movl 40(%esp,%esi), %ebp
+; FALLBACK18-NEXT: shrxl %eax, %edx, %edi
+; FALLBACK18-NEXT: notb %cl
+; FALLBACK18-NEXT: leal (%ebp,%ebp), %ebx
+; FALLBACK18-NEXT: shlxl %ecx, %ebx, %ebx
+; FALLBACK18-NEXT: orl %edi, %ebx
+; FALLBACK18-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shrxl %eax, 32(%esp,%esi), %edi
+; FALLBACK18-NEXT: addl %edx, %edx
+; FALLBACK18-NEXT: shlxl %ecx, %edx, %edx
+; FALLBACK18-NEXT: orl %edi, %edx
+; FALLBACK18-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 48(%esp,%esi), %edx
+; FALLBACK18-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: addl %edx, %edx
+; FALLBACK18-NEXT: shlxl %ecx, %edx, %ebx
+; FALLBACK18-NEXT: movl 44(%esp,%esi), %edx
+; FALLBACK18-NEXT: shrxl %eax, %edx, %edi
+; FALLBACK18-NEXT: orl %edi, %ebx
+; FALLBACK18-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shrxl %eax, %ebp, %edi
+; FALLBACK18-NEXT: movl %eax, %ebp
+; FALLBACK18-NEXT: addl %edx, %edx
+; FALLBACK18-NEXT: shlxl %ecx, %edx, %eax
+; FALLBACK18-NEXT: orl %edi, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl 56(%esp,%edi), %ebp
-; FALLBACK18-NEXT: leal (%ebp,%ebp), %ecx
-; FALLBACK18-NEXT: shlxl %edx, %ecx, %ecx
-; FALLBACK18-NEXT: movl 52(%esp,%edi), %eax
-; FALLBACK18-NEXT: shrxl %ebx, %eax, %esi
-; FALLBACK18-NEXT: orl %esi, %ecx
-; FALLBACK18-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; FALLBACK18-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 56(%esp,%esi), %edi
+; FALLBACK18-NEXT: leal (%edi,%edi), %edx
+; FALLBACK18-NEXT: shlxl %ecx, %edx, %edx
+; FALLBACK18-NEXT: movl 52(%esp,%esi), %eax
+; FALLBACK18-NEXT: shrxl %ebp, %eax, %ebx
+; FALLBACK18-NEXT: orl %ebx, %edx
+; FALLBACK18-NEXT: shrxl %ebp, {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
; FALLBACK18-NEXT: addl %eax, %eax
-; FALLBACK18-NEXT: shlxl %edx, %eax, %esi
-; FALLBACK18-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; FALLBACK18-NEXT: shrxl %ebx, %ebp, %eax
-; FALLBACK18-NEXT: movl 60(%esp,%edi), %edi
-; FALLBACK18-NEXT: sarxl %ebx, %edi, %ebx
-; FALLBACK18-NEXT: addl %edi, %edi
-; FALLBACK18-NEXT: shlxl %edx, %edi, %edx
-; FALLBACK18-NEXT: orl %eax, %edx
-; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %eax
-; FALLBACK18-NEXT: movl %ebx, 28(%eax)
-; FALLBACK18-NEXT: movl %edx, 24(%eax)
-; FALLBACK18-NEXT: movl %esi, 16(%eax)
-; FALLBACK18-NEXT: movl %ecx, 20(%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, 8(%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, 12(%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, (%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, 4(%eax)
+; FALLBACK18-NEXT: shlxl %ecx, %eax, %eax
+; FALLBACK18-NEXT: orl %ebx, %eax
+; FALLBACK18-NEXT: movl 60(%esp,%esi), %esi
+; FALLBACK18-NEXT: leal (%esi,%esi), %ebx
+; FALLBACK18-NEXT: shlxl %ecx, %ebx, %ecx
+; FALLBACK18-NEXT: shrxl %ebp, %edi, %edi
+; FALLBACK18-NEXT: orl %edi, %ecx
+; FALLBACK18-NEXT: sarxl %ebp, %esi, %esi
+; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %edi
+; FALLBACK18-NEXT: movl %esi, 28(%edi)
+; FALLBACK18-NEXT: movl %ecx, 24(%edi)
+; FALLBACK18-NEXT: movl %eax, 16(%edi)
+; FALLBACK18-NEXT: movl %edx, 20(%edi)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, 8(%edi)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, 12(%edi)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, (%edi)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, 4(%edi)
; FALLBACK18-NEXT: addl $108, %esp
; FALLBACK18-NEXT: popl %esi
; FALLBACK18-NEXT: popl %edi
@@ -10070,82 +10058,82 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK22-NEXT: movups (%ecx), %xmm0
; FALLBACK22-NEXT: movl 16(%ecx), %esi
; FALLBACK22-NEXT: movl 20(%ecx), %edi
-; FALLBACK22-NEXT: movl 24(%ecx), %ebx
-; FALLBACK22-NEXT: movl 28(%ecx), %edx
-; FALLBACK22-NEXT: movzbl (%eax), %ecx
-; FALLBACK22-NEXT: movl %ecx, %eax
-; FALLBACK22-NEXT: shlb $3, %al
-; FALLBACK22-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK22-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl 24(%ecx), %ebp
+; FALLBACK22-NEXT: movl 28(%ecx), %ecx
+; FALLBACK22-NEXT: movzbl (%eax), %edx
+; FALLBACK22-NEXT: movl %edx, %ebx
+; FALLBACK22-NEXT: shlb $3, %bl
+; FALLBACK22-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %ebp, {{[0-9]+}}(%esp)
; FALLBACK22-NEXT: movl %edi, {{[0-9]+}}(%esp)
; FALLBACK22-NEXT: movl %esi, {{[0-9]+}}(%esp)
; FALLBACK22-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
-; FALLBACK22-NEXT: sarl $31, %edx
-; FALLBACK22-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK22-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK22-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK22-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK22-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK22-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK22-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK22-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK22-NEXT: andb $28, %cl
-; FALLBACK22-NEXT: movzbl %cl, %edi
-; FALLBACK22-NEXT: shrxl %eax, 32(%esp,%edi), %ecx
-; FALLBACK22-NEXT: movl %eax, %edx
-; FALLBACK22-NEXT: notb %dl
-; FALLBACK22-NEXT: movl 36(%esp,%edi), %esi
-; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: addl %esi, %esi
-; FALLBACK22-NEXT: shlxl %edx, %esi, %esi
-; FALLBACK22-NEXT: orl %ecx, %esi
-; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: movl 48(%esp,%edi), %ecx
-; FALLBACK22-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: addl %ecx, %ecx
-; FALLBACK22-NEXT: shlxl %edx, %ecx, %esi
-; FALLBACK22-NEXT: movl 44(%esp,%edi), %ecx
-; FALLBACK22-NEXT: shrxl %eax, %ecx, %ebx
-; FALLBACK22-NEXT: orl %ebx, %esi
-; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: addl %ecx, %ecx
-; FALLBACK22-NEXT: shlxl %edx, %ecx, %esi
-; FALLBACK22-NEXT: movl 40(%esp,%edi), %ecx
-; FALLBACK22-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shrxl %eax, %ecx, %ebx
-; FALLBACK22-NEXT: movl %eax, %ecx
-; FALLBACK22-NEXT: orl %ebx, %esi
-; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: movl 56(%esp,%edi), %esi
-; FALLBACK22-NEXT: leal (%esi,%esi), %ebx
-; FALLBACK22-NEXT: shlxl %edx, %ebx, %eax
-; FALLBACK22-NEXT: movl 52(%esp,%edi), %ebx
-; FALLBACK22-NEXT: shrxl %ecx, %ebx, %ebp
-; FALLBACK22-NEXT: orl %ebp, %eax
+; FALLBACK22-NEXT: sarl $31, %ecx
+; FALLBACK22-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK22-NEXT: movl %ebx, %eax
+; FALLBACK22-NEXT: andb $28, %dl
+; FALLBACK22-NEXT: movzbl %dl, %ecx
+; FALLBACK22-NEXT: shrxl %eax, 32(%esp,%ecx), %edx
+; FALLBACK22-NEXT: movl %eax, %ebp
+; FALLBACK22-NEXT: notb %bl
+; FALLBACK22-NEXT: movl 36(%esp,%ecx), %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: movl %ecx, %eax
-; FALLBACK22-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
-; FALLBACK22-NEXT: addl %ebx, %ebx
-; FALLBACK22-NEXT: shlxl %edx, %ebx, %ebx
-; FALLBACK22-NEXT: orl %ebp, %ebx
-; FALLBACK22-NEXT: shrxl %ecx, %esi, %ecx
-; FALLBACK22-NEXT: shrxl %eax, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; FALLBACK22-NEXT: movl 60(%esp,%edi), %edi
-; FALLBACK22-NEXT: sarxl %eax, %edi, %eax
-; FALLBACK22-NEXT: addl %edi, %edi
-; FALLBACK22-NEXT: shlxl %edx, %edi, %edi
-; FALLBACK22-NEXT: orl %ecx, %edi
-; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK22-NEXT: addl %ecx, %ecx
-; FALLBACK22-NEXT: shlxl %edx, %ecx, %ecx
-; FALLBACK22-NEXT: orl %esi, %ecx
-; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %edx
-; FALLBACK22-NEXT: movl %eax, 28(%edx)
-; FALLBACK22-NEXT: movl %ecx, 4(%edx)
-; FALLBACK22-NEXT: movl %edi, 24(%edx)
-; FALLBACK22-NEXT: movl %ebx, 16(%edx)
+; FALLBACK22-NEXT: leal (%eax,%eax), %esi
+; FALLBACK22-NEXT: shlxl %ebx, %esi, %eax
+; FALLBACK22-NEXT: orl %edx, %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl 48(%esp,%ecx), %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: leal (%eax,%eax), %edx
+; FALLBACK22-NEXT: shlxl %ebx, %edx, %edi
+; FALLBACK22-NEXT: movl 44(%esp,%ecx), %edx
+; FALLBACK22-NEXT: shrxl %ebp, %edx, %esi
+; FALLBACK22-NEXT: orl %esi, %edi
+; FALLBACK22-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: addl %edx, %edx
+; FALLBACK22-NEXT: shlxl %ebx, %edx, %edi
+; FALLBACK22-NEXT: movl 40(%esp,%ecx), %edx
+; FALLBACK22-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shrxl %ebp, %edx, %esi
+; FALLBACK22-NEXT: movl %ebp, %edx
+; FALLBACK22-NEXT: orl %esi, %edi
+; FALLBACK22-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl 56(%esp,%ecx), %esi
+; FALLBACK22-NEXT: leal (%esi,%esi), %ebp
+; FALLBACK22-NEXT: shlxl %ebx, %ebp, %ebp
+; FALLBACK22-NEXT: movl 52(%esp,%ecx), %eax
+; FALLBACK22-NEXT: shrxl %edx, %eax, %edi
+; FALLBACK22-NEXT: orl %edi, %ebp
+; FALLBACK22-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK22-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: addl %eax, %eax
+; FALLBACK22-NEXT: shlxl %ebx, %eax, %edi
+; FALLBACK22-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK22-NEXT: shrxl %edx, %esi, %eax
+; FALLBACK22-NEXT: movl 60(%esp,%ecx), %ecx
+; FALLBACK22-NEXT: leal (%ecx,%ecx), %esi
+; FALLBACK22-NEXT: shlxl %ebx, %esi, %esi
+; FALLBACK22-NEXT: orl %eax, %esi
; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK22-NEXT: movl %eax, 20(%edx)
+; FALLBACK22-NEXT: addl %eax, %eax
+; FALLBACK22-NEXT: shlxl %ebx, %eax, %eax
+; FALLBACK22-NEXT: movl %edx, %ebx
+; FALLBACK22-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; FALLBACK22-NEXT: orl %edx, %eax
+; FALLBACK22-NEXT: sarxl %ebx, %ecx, %ecx
+; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK22-NEXT: movl %ecx, 28(%edx)
+; FALLBACK22-NEXT: movl %eax, 4(%edx)
+; FALLBACK22-NEXT: movl %esi, 24(%edx)
+; FALLBACK22-NEXT: movl %edi, 16(%edx)
+; FALLBACK22-NEXT: movl %ebp, 20(%edx)
; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; FALLBACK22-NEXT: movl %eax, 8(%edx)
; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
@@ -10446,82 +10434,82 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK26-NEXT: vmovups (%ecx), %xmm0
; FALLBACK26-NEXT: movl 16(%ecx), %esi
; FALLBACK26-NEXT: movl 20(%ecx), %edi
-; FALLBACK26-NEXT: movl 24(%ecx), %ebx
-; FALLBACK26-NEXT: movl 28(%ecx), %edx
-; FALLBACK26-NEXT: movzbl (%eax), %ecx
-; FALLBACK26-NEXT: movl %ecx, %eax
-; FALLBACK26-NEXT: shlb $3, %al
-; FALLBACK26-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK26-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl 24(%ecx), %ebp
+; FALLBACK26-NEXT: movl 28(%ecx), %ecx
+; FALLBACK26-NEXT: movzbl (%eax), %edx
+; FALLBACK26-NEXT: movl %edx, %ebx
+; FALLBACK26-NEXT: shlb $3, %bl
+; FALLBACK26-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %ebp, {{[0-9]+}}(%esp)
; FALLBACK26-NEXT: movl %edi, {{[0-9]+}}(%esp)
; FALLBACK26-NEXT: movl %esi, {{[0-9]+}}(%esp)
; FALLBACK26-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp)
-; FALLBACK26-NEXT: sarl $31, %edx
-; FALLBACK26-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK26-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK26-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK26-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK26-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK26-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK26-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK26-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK26-NEXT: andb $28, %cl
-; FALLBACK26-NEXT: movzbl %cl, %edi
-; FALLBACK26-NEXT: shrxl %eax, 32(%esp,%edi), %ecx
-; FALLBACK26-NEXT: movl %eax, %edx
-; FALLBACK26-NEXT: notb %dl
-; FALLBACK26-NEXT: movl 36(%esp,%edi), %esi
-; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: addl %esi, %esi
-; FALLBACK26-NEXT: shlxl %edx, %esi, %esi
-; FALLBACK26-NEXT: orl %ecx, %esi
-; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: movl 48(%esp,%edi), %ecx
-; FALLBACK26-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: addl %ecx, %ecx
-; FALLBACK26-NEXT: shlxl %edx, %ecx, %esi
-; FALLBACK26-NEXT: movl 44(%esp,%edi), %ecx
-; FALLBACK26-NEXT: shrxl %eax, %ecx, %ebx
-; FALLBACK26-NEXT: orl %ebx, %esi
-; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: addl %ecx, %ecx
-; FALLBACK26-NEXT: shlxl %edx, %ecx, %esi
-; FALLBACK26-NEXT: movl 40(%esp,%edi), %ecx
-; FALLBACK26-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shrxl %eax, %ecx, %ebx
-; FALLBACK26-NEXT: movl %eax, %ecx
-; FALLBACK26-NEXT: orl %ebx, %esi
-; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: movl 56(%esp,%edi), %esi
-; FALLBACK26-NEXT: leal (%esi,%esi), %ebx
-; FALLBACK26-NEXT: shlxl %edx, %ebx, %eax
-; FALLBACK26-NEXT: movl 52(%esp,%edi), %ebx
-; FALLBACK26-NEXT: shrxl %ecx, %ebx, %ebp
-; FALLBACK26-NEXT: orl %ebp, %eax
+; FALLBACK26-NEXT: sarl $31, %ecx
+; FALLBACK26-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK26-NEXT: movl %ebx, %eax
+; FALLBACK26-NEXT: andb $28, %dl
+; FALLBACK26-NEXT: movzbl %dl, %ecx
+; FALLBACK26-NEXT: shrxl %eax, 32(%esp,%ecx), %edx
+; FALLBACK26-NEXT: movl %eax, %ebp
+; FALLBACK26-NEXT: notb %bl
+; FALLBACK26-NEXT: movl 36(%esp,%ecx), %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: movl %ecx, %eax
-; FALLBACK26-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
-; FALLBACK26-NEXT: addl %ebx, %ebx
-; FALLBACK26-NEXT: shlxl %edx, %ebx, %ebx
-; FALLBACK26-NEXT: orl %ebp, %ebx
-; FALLBACK26-NEXT: shrxl %ecx, %esi, %ecx
-; FALLBACK26-NEXT: shrxl %eax, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; FALLBACK26-NEXT: movl 60(%esp,%edi), %edi
-; FALLBACK26-NEXT: sarxl %eax, %edi, %eax
-; FALLBACK26-NEXT: addl %edi, %edi
-; FALLBACK26-NEXT: shlxl %edx, %edi, %edi
-; FALLBACK26-NEXT: orl %ecx, %edi
-; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK26-NEXT: addl %ecx, %ecx
-; FALLBACK26-NEXT: shlxl %edx, %ecx, %ecx
-; FALLBACK26-NEXT: orl %esi, %ecx
-; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %edx
-; FALLBACK26-NEXT: movl %eax, 28(%edx)
-; FALLBACK26-NEXT: movl %ecx, 4(%edx)
-; FALLBACK26-NEXT: movl %edi, 24(%edx)
-; FALLBACK26-NEXT: movl %ebx, 16(%edx)
+; FALLBACK26-NEXT: leal (%eax,%eax), %esi
+; FALLBACK26-NEXT: shlxl %ebx, %esi, %eax
+; FALLBACK26-NEXT: orl %edx, %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl 48(%esp,%ecx), %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: leal (%eax,%eax), %edx
+; FALLBACK26-NEXT: shlxl %ebx, %edx, %edi
+; FALLBACK26-NEXT: movl 44(%esp,%ecx), %edx
+; FALLBACK26-NEXT: shrxl %ebp, %edx, %esi
+; FALLBACK26-NEXT: orl %esi, %edi
+; FALLBACK26-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: addl %edx, %edx
+; FALLBACK26-NEXT: shlxl %ebx, %edx, %edi
+; FALLBACK26-NEXT: movl 40(%esp,%ecx), %edx
+; FALLBACK26-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shrxl %ebp, %edx, %esi
+; FALLBACK26-NEXT: movl %ebp, %edx
+; FALLBACK26-NEXT: orl %esi, %edi
+; FALLBACK26-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl 56(%esp,%ecx), %esi
+; FALLBACK26-NEXT: leal (%esi,%esi), %ebp
+; FALLBACK26-NEXT: shlxl %ebx, %ebp, %ebp
+; FALLBACK26-NEXT: movl 52(%esp,%ecx), %eax
+; FALLBACK26-NEXT: shrxl %edx, %eax, %edi
+; FALLBACK26-NEXT: orl %edi, %ebp
+; FALLBACK26-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK26-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: addl %eax, %eax
+; FALLBACK26-NEXT: shlxl %ebx, %eax, %edi
+; FALLBACK26-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK26-NEXT: shrxl %edx, %esi, %eax
+; FALLBACK26-NEXT: movl 60(%esp,%ecx), %ecx
+; FALLBACK26-NEXT: leal (%ecx,%ecx), %esi
+; FALLBACK26-NEXT: shlxl %ebx, %esi, %esi
+; FALLBACK26-NEXT: orl %eax, %esi
; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK26-NEXT: movl %eax, 20(%edx)
+; FALLBACK26-NEXT: addl %eax, %eax
+; FALLBACK26-NEXT: shlxl %ebx, %eax, %eax
+; FALLBACK26-NEXT: movl %edx, %ebx
+; FALLBACK26-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; FALLBACK26-NEXT: orl %edx, %eax
+; FALLBACK26-NEXT: sarxl %ebx, %ecx, %ecx
+; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK26-NEXT: movl %ecx, 28(%edx)
+; FALLBACK26-NEXT: movl %eax, 4(%edx)
+; FALLBACK26-NEXT: movl %esi, 24(%edx)
+; FALLBACK26-NEXT: movl %edi, 16(%edx)
+; FALLBACK26-NEXT: movl %ebp, 20(%edx)
; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; FALLBACK26-NEXT: movl %eax, 8(%edx)
; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
@@ -10822,82 +10810,82 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK30-NEXT: vmovups (%ecx), %xmm0
; FALLBACK30-NEXT: movl 16(%ecx), %esi
; FALLBACK30-NEXT: movl 20(%ecx), %edi
-; FALLBACK30-NEXT: movl 24(%ecx), %ebx
-; FALLBACK30-NEXT: movl 28(%ecx), %edx
-; FALLBACK30-NEXT: movzbl (%eax), %ecx
-; FALLBACK30-NEXT: movl %ecx, %eax
-; FALLBACK30-NEXT: shlb $3, %al
-; FALLBACK30-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK30-NEXT: movl %ebx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl 24(%ecx), %ebp
+; FALLBACK30-NEXT: movl 28(%ecx), %ecx
+; FALLBACK30-NEXT: movzbl (%eax), %edx
+; FALLBACK30-NEXT: movl %edx, %ebx
+; FALLBACK30-NEXT: shlb $3, %bl
+; FALLBACK30-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %ebp, {{[0-9]+}}(%esp)
; FALLBACK30-NEXT: movl %edi, {{[0-9]+}}(%esp)
; FALLBACK30-NEXT: movl %esi, {{[0-9]+}}(%esp)
; FALLBACK30-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp)
-; FALLBACK30-NEXT: sarl $31, %edx
-; FALLBACK30-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK30-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK30-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK30-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK30-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK30-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK30-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK30-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; FALLBACK30-NEXT: andb $28, %cl
-; FALLBACK30-NEXT: movzbl %cl, %edi
-; FALLBACK30-NEXT: shrxl %eax, 32(%esp,%edi), %ecx
-; FALLBACK30-NEXT: movl %eax, %edx
-; FALLBACK30-NEXT: notb %dl
-; FALLBACK30-NEXT: movl 36(%esp,%edi), %esi
-; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: addl %esi, %esi
-; FALLBACK30-NEXT: shlxl %edx, %esi, %esi
-; FALLBACK30-NEXT: orl %ecx, %esi
-; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: movl 48(%esp,%edi), %ecx
-; FALLBACK30-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: addl %ecx, %ecx
-; FALLBACK30-NEXT: shlxl %edx, %ecx, %esi
-; FALLBACK30-NEXT: movl 44(%esp,%edi), %ecx
-; FALLBACK30-NEXT: shrxl %eax, %ecx, %ebx
-; FALLBACK30-NEXT: orl %ebx, %esi
-; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: addl %ecx, %ecx
-; FALLBACK30-NEXT: shlxl %edx, %ecx, %esi
-; FALLBACK30-NEXT: movl 40(%esp,%edi), %ecx
-; FALLBACK30-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shrxl %eax, %ecx, %ebx
-; FALLBACK30-NEXT: movl %eax, %ecx
-; FALLBACK30-NEXT: orl %ebx, %esi
-; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: movl 56(%esp,%edi), %esi
-; FALLBACK30-NEXT: leal (%esi,%esi), %ebx
-; FALLBACK30-NEXT: shlxl %edx, %ebx, %eax
-; FALLBACK30-NEXT: movl 52(%esp,%edi), %ebx
-; FALLBACK30-NEXT: shrxl %ecx, %ebx, %ebp
-; FALLBACK30-NEXT: orl %ebp, %eax
+; FALLBACK30-NEXT: sarl $31, %ecx
+; FALLBACK30-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK30-NEXT: movl %ebx, %eax
+; FALLBACK30-NEXT: andb $28, %dl
+; FALLBACK30-NEXT: movzbl %dl, %ecx
+; FALLBACK30-NEXT: shrxl %eax, 32(%esp,%ecx), %edx
+; FALLBACK30-NEXT: movl %eax, %ebp
+; FALLBACK30-NEXT: notb %bl
+; FALLBACK30-NEXT: movl 36(%esp,%ecx), %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: movl %ecx, %eax
-; FALLBACK30-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
-; FALLBACK30-NEXT: addl %ebx, %ebx
-; FALLBACK30-NEXT: shlxl %edx, %ebx, %ebx
-; FALLBACK30-NEXT: orl %ebp, %ebx
-; FALLBACK30-NEXT: shrxl %ecx, %esi, %ecx
-; FALLBACK30-NEXT: shrxl %eax, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; FALLBACK30-NEXT: movl 60(%esp,%edi), %edi
-; FALLBACK30-NEXT: sarxl %eax, %edi, %eax
-; FALLBACK30-NEXT: addl %edi, %edi
-; FALLBACK30-NEXT: shlxl %edx, %edi, %edi
-; FALLBACK30-NEXT: orl %ecx, %edi
-; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK30-NEXT: addl %ecx, %ecx
-; FALLBACK30-NEXT: shlxl %edx, %ecx, %ecx
-; FALLBACK30-NEXT: orl %esi, %ecx
-; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %edx
-; FALLBACK30-NEXT: movl %eax, 28(%edx)
-; FALLBACK30-NEXT: movl %ecx, 4(%edx)
-; FALLBACK30-NEXT: movl %edi, 24(%edx)
-; FALLBACK30-NEXT: movl %ebx, 16(%edx)
+; FALLBACK30-NEXT: leal (%eax,%eax), %esi
+; FALLBACK30-NEXT: shlxl %ebx, %esi, %eax
+; FALLBACK30-NEXT: orl %edx, %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl 48(%esp,%ecx), %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: leal (%eax,%eax), %edx
+; FALLBACK30-NEXT: shlxl %ebx, %edx, %edi
+; FALLBACK30-NEXT: movl 44(%esp,%ecx), %edx
+; FALLBACK30-NEXT: shrxl %ebp, %edx, %esi
+; FALLBACK30-NEXT: orl %esi, %edi
+; FALLBACK30-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: addl %edx, %edx
+; FALLBACK30-NEXT: shlxl %ebx, %edx, %edi
+; FALLBACK30-NEXT: movl 40(%esp,%ecx), %edx
+; FALLBACK30-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shrxl %ebp, %edx, %esi
+; FALLBACK30-NEXT: movl %ebp, %edx
+; FALLBACK30-NEXT: orl %esi, %edi
+; FALLBACK30-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl 56(%esp,%ecx), %esi
+; FALLBACK30-NEXT: leal (%esi,%esi), %ebp
+; FALLBACK30-NEXT: shlxl %ebx, %ebp, %ebp
+; FALLBACK30-NEXT: movl 52(%esp,%ecx), %eax
+; FALLBACK30-NEXT: shrxl %edx, %eax, %edi
+; FALLBACK30-NEXT: orl %edi, %ebp
+; FALLBACK30-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK30-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: addl %eax, %eax
+; FALLBACK30-NEXT: shlxl %ebx, %eax, %edi
+; FALLBACK30-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK30-NEXT: shrxl %edx, %esi, %eax
+; FALLBACK30-NEXT: movl 60(%esp,%ecx), %ecx
+; FALLBACK30-NEXT: leal (%ecx,%ecx), %esi
+; FALLBACK30-NEXT: shlxl %ebx, %esi, %esi
+; FALLBACK30-NEXT: orl %eax, %esi
; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK30-NEXT: movl %eax, 20(%edx)
+; FALLBACK30-NEXT: addl %eax, %eax
+; FALLBACK30-NEXT: shlxl %ebx, %eax, %eax
+; FALLBACK30-NEXT: movl %edx, %ebx
+; FALLBACK30-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; FALLBACK30-NEXT: orl %edx, %eax
+; FALLBACK30-NEXT: sarxl %ebx, %ecx, %ecx
+; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK30-NEXT: movl %ecx, 28(%edx)
+; FALLBACK30-NEXT: movl %eax, 4(%edx)
+; FALLBACK30-NEXT: movl %esi, 24(%edx)
+; FALLBACK30-NEXT: movl %edi, 16(%edx)
+; FALLBACK30-NEXT: movl %ebp, 20(%edx)
; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; FALLBACK30-NEXT: movl %eax, 8(%edx)
; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
@@ -11104,30 +11092,30 @@ define void @ashr_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) no
; FALLBACK2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK2-NEXT: movl %eax, %ecx
; FALLBACK2-NEXT: andb $6, %sil
-; FALLBACK2-NEXT: movzbl %sil, %ecx
-; FALLBACK2-NEXT: movq -64(%rsp,%rcx,4), %rsi
-; FALLBACK2-NEXT: movq -56(%rsp,%rcx,4), %rdi
-; FALLBACK2-NEXT: shrxq %rax, %rsi, %r8
-; FALLBACK2-NEXT: shrxq %rax, -72(%rsp,%rcx,4), %r9
-; FALLBACK2-NEXT: shrxq %rax, %rdi, %r10
-; FALLBACK2-NEXT: movq -48(%rsp,%rcx,4), %rcx
-; FALLBACK2-NEXT: sarxq %rax, %rcx, %r11
-; FALLBACK2-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK2-NEXT: movzbl %sil, %esi
+; FALLBACK2-NEXT: movq -64(%rsp,%rsi,4), %rdi
+; FALLBACK2-NEXT: movq -56(%rsp,%rsi,4), %r8
+; FALLBACK2-NEXT: shrxq %rcx, %rdi, %r9
; FALLBACK2-NEXT: notb %al
+; FALLBACK2-NEXT: leaq (%r8,%r8), %r10
+; FALLBACK2-NEXT: shlxq %rax, %r10, %r10
+; FALLBACK2-NEXT: orq %r9, %r10
+; FALLBACK2-NEXT: shrxq %rcx, -72(%rsp,%rsi,4), %r9
; FALLBACK2-NEXT: addq %rdi, %rdi
; FALLBACK2-NEXT: shlxq %rax, %rdi, %rdi
-; FALLBACK2-NEXT: orq %r8, %rdi
-; FALLBACK2-NEXT: addq %rsi, %rsi
-; FALLBACK2-NEXT: shlxq %rax, %rsi, %rsi
-; FALLBACK2-NEXT: orq %r9, %rsi
-; FALLBACK2-NEXT: addq %rcx, %rcx
-; FALLBACK2-NEXT: shlxq %rax, %rcx, %rax
-; FALLBACK2-NEXT: orq %r10, %rax
-; FALLBACK2-NEXT: movq %r11, 24(%rdx)
+; FALLBACK2-NEXT: orq %r9, %rdi
+; FALLBACK2-NEXT: shrxq %rcx, %r8, %r8
+; FALLBACK2-NEXT: movq -48(%rsp,%rsi,4), %rsi
+; FALLBACK2-NEXT: leaq (%rsi,%rsi), %r9
+; FALLBACK2-NEXT: shlxq %rax, %r9, %rax
+; FALLBACK2-NEXT: orq %r8, %rax
+; FALLBACK2-NEXT: sarxq %rcx, %rsi, %rcx
+; FALLBACK2-NEXT: movq %rcx, 24(%rdx)
; FALLBACK2-NEXT: movq %rax, 16(%rdx)
-; FALLBACK2-NEXT: movq %rsi, (%rdx)
-; FALLBACK2-NEXT: movq %rdi, 8(%rdx)
+; FALLBACK2-NEXT: movq %rdi, (%rdx)
+; FALLBACK2-NEXT: movq %r10, 8(%rdx)
; FALLBACK2-NEXT: retq
;
; FALLBACK3-LABEL: ashr_32bytes_dwordOff:
@@ -11268,30 +11256,30 @@ define void @ashr_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) no
; FALLBACK6-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK6-NEXT: movl %eax, %ecx
; FALLBACK6-NEXT: andb $6, %sil
-; FALLBACK6-NEXT: movzbl %sil, %ecx
-; FALLBACK6-NEXT: shrxq %rax, -72(%rsp,%rcx,4), %rsi
-; FALLBACK6-NEXT: movq -64(%rsp,%rcx,4), %rdi
-; FALLBACK6-NEXT: movq -56(%rsp,%rcx,4), %r8
-; FALLBACK6-NEXT: shrxq %rax, %r8, %r9
-; FALLBACK6-NEXT: movq -48(%rsp,%rcx,4), %rcx
-; FALLBACK6-NEXT: shrxq %rax, %rdi, %r10
-; FALLBACK6-NEXT: sarxq %rax, %rcx, %r11
-; FALLBACK6-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK6-NEXT: movzbl %sil, %esi
+; FALLBACK6-NEXT: shrxq %rcx, -72(%rsp,%rsi,4), %rdi
; FALLBACK6-NEXT: notb %al
-; FALLBACK6-NEXT: addq %rdi, %rdi
-; FALLBACK6-NEXT: shlxq %rax, %rdi, %rdi
-; FALLBACK6-NEXT: orq %rsi, %rdi
-; FALLBACK6-NEXT: addq %rcx, %rcx
-; FALLBACK6-NEXT: shlxq %rax, %rcx, %rcx
-; FALLBACK6-NEXT: orq %r9, %rcx
-; FALLBACK6-NEXT: addq %r8, %r8
-; FALLBACK6-NEXT: shlxq %rax, %r8, %rax
-; FALLBACK6-NEXT: orq %r10, %rax
-; FALLBACK6-NEXT: movq %r11, 24(%rdx)
+; FALLBACK6-NEXT: movq -64(%rsp,%rsi,4), %r8
+; FALLBACK6-NEXT: movq -56(%rsp,%rsi,4), %r9
+; FALLBACK6-NEXT: leaq (%r8,%r8), %r10
+; FALLBACK6-NEXT: shlxq %rax, %r10, %r10
+; FALLBACK6-NEXT: orq %rdi, %r10
+; FALLBACK6-NEXT: shrxq %rcx, %r9, %rdi
+; FALLBACK6-NEXT: movq -48(%rsp,%rsi,4), %rsi
+; FALLBACK6-NEXT: leaq (%rsi,%rsi), %r11
+; FALLBACK6-NEXT: shlxq %rax, %r11, %r11
+; FALLBACK6-NEXT: orq %rdi, %r11
+; FALLBACK6-NEXT: shrxq %rcx, %r8, %rdi
+; FALLBACK6-NEXT: addq %r9, %r9
+; FALLBACK6-NEXT: shlxq %rax, %r9, %rax
+; FALLBACK6-NEXT: orq %rdi, %rax
+; FALLBACK6-NEXT: sarxq %rcx, %rsi, %rcx
+; FALLBACK6-NEXT: movq %rcx, 24(%rdx)
; FALLBACK6-NEXT: movq %rax, 8(%rdx)
-; FALLBACK6-NEXT: movq %rcx, 16(%rdx)
-; FALLBACK6-NEXT: movq %rdi, (%rdx)
+; FALLBACK6-NEXT: movq %r11, 16(%rdx)
+; FALLBACK6-NEXT: movq %r10, (%rdx)
; FALLBACK6-NEXT: retq
;
; FALLBACK7-LABEL: ashr_32bytes_dwordOff:
@@ -11431,30 +11419,30 @@ define void @ashr_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) no
; FALLBACK10-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK10-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK10-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK10-NEXT: movl %eax, %ecx
; FALLBACK10-NEXT: andb $6, %sil
-; FALLBACK10-NEXT: movzbl %sil, %ecx
-; FALLBACK10-NEXT: shrxq %rax, -72(%rsp,%rcx,4), %rsi
-; FALLBACK10-NEXT: movq -64(%rsp,%rcx,4), %rdi
-; FALLBACK10-NEXT: movq -56(%rsp,%rcx,4), %r8
-; FALLBACK10-NEXT: shrxq %rax, %r8, %r9
-; FALLBACK10-NEXT: movq -48(%rsp,%rcx,4), %rcx
-; FALLBACK10-NEXT: shrxq %rax, %rdi, %r10
-; FALLBACK10-NEXT: sarxq %rax, %rcx, %r11
-; FALLBACK10-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK10-NEXT: movzbl %sil, %esi
+; FALLBACK10-NEXT: shrxq %rcx, -72(%rsp,%rsi,4), %rdi
; FALLBACK10-NEXT: notb %al
-; FALLBACK10-NEXT: addq %rdi, %rdi
-; FALLBACK10-NEXT: shlxq %rax, %rdi, %rdi
-; FALLBACK10-NEXT: orq %rsi, %rdi
-; FALLBACK10-NEXT: addq %rcx, %rcx
-; FALLBACK10-NEXT: shlxq %rax, %rcx, %rcx
-; FALLBACK10-NEXT: orq %r9, %rcx
-; FALLBACK10-NEXT: addq %r8, %r8
-; FALLBACK10-NEXT: shlxq %rax, %r8, %rax
-; FALLBACK10-NEXT: orq %r10, %rax
-; FALLBACK10-NEXT: movq %r11, 24(%rdx)
+; FALLBACK10-NEXT: movq -64(%rsp,%rsi,4), %r8
+; FALLBACK10-NEXT: movq -56(%rsp,%rsi,4), %r9
+; FALLBACK10-NEXT: leaq (%r8,%r8), %r10
+; FALLBACK10-NEXT: shlxq %rax, %r10, %r10
+; FALLBACK10-NEXT: orq %rdi, %r10
+; FALLBACK10-NEXT: shrxq %rcx, %r9, %rdi
+; FALLBACK10-NEXT: movq -48(%rsp,%rsi,4), %rsi
+; FALLBACK10-NEXT: leaq (%rsi,%rsi), %r11
+; FALLBACK10-NEXT: shlxq %rax, %r11, %r11
+; FALLBACK10-NEXT: orq %rdi, %r11
+; FALLBACK10-NEXT: shrxq %rcx, %r8, %rdi
+; FALLBACK10-NEXT: addq %r9, %r9
+; FALLBACK10-NEXT: shlxq %rax, %r9, %rax
+; FALLBACK10-NEXT: orq %rdi, %rax
+; FALLBACK10-NEXT: sarxq %rcx, %rsi, %rcx
+; FALLBACK10-NEXT: movq %rcx, 24(%rdx)
; FALLBACK10-NEXT: movq %rax, 8(%rdx)
-; FALLBACK10-NEXT: movq %rcx, 16(%rdx)
-; FALLBACK10-NEXT: movq %rdi, (%rdx)
+; FALLBACK10-NEXT: movq %r11, 16(%rdx)
+; FALLBACK10-NEXT: movq %r10, (%rdx)
; FALLBACK10-NEXT: retq
;
; FALLBACK11-LABEL: ashr_32bytes_dwordOff:
@@ -11594,30 +11582,30 @@ define void @ashr_32bytes_dwordOff(ptr %src.ptr, ptr %dwordOff.ptr, ptr %dst) no
; FALLBACK14-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK14-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK14-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
+; FALLBACK14-NEXT: movl %eax, %ecx
; FALLBACK14-NEXT: andb $6, %sil
-; FALLBACK14-NEXT: movzbl %sil, %ecx
-; FALLBACK14-NEXT: shrxq %rax, -72(%rsp,%rcx,4), %rsi
-; FALLBACK14-NEXT: movq -64(%rsp,%rcx,4), %rdi
-; FALLBACK14-NEXT: movq -56(%rsp,%rcx,4), %r8
-; FALLBACK14-NEXT: shrxq %rax, %r8, %r9
-; FALLBACK14-NEXT: movq -48(%rsp,%rcx,4), %rcx
-; FALLBACK14-NEXT: shrxq %rax, %rdi, %r10
-; FALLBACK14-NEXT: sarxq %rax, %rcx, %r11
-; FALLBACK14-NEXT: # kill: def $al killed $al killed $rax def $rax
+; FALLBACK14-NEXT: movzbl %sil, %esi
+; FALLBACK14-NEXT: shrxq %rcx, -72(%rsp,%rsi,4), %rdi
; FALLBACK14-NEXT: notb %al
-; FALLBACK14-NEXT: addq %rdi, %rdi
-; FALLBACK14-NEXT: shlxq %rax, %rdi, %rdi
-; FALLBACK14-NEXT: orq %rsi, %rdi
-; FALLBACK14-NEXT: addq %rcx, %rcx
-; FALLBACK14-NEXT: shlxq %rax, %rcx, %rcx
-; FALLBACK14-NEXT: orq %r9, %rcx
-; FALLBACK14-NEXT: addq %r8, %r8
-; FALLBACK14-NEXT: shlxq %rax, %r8, %rax
-; FALLBACK14-NEXT: orq %r10, %rax
-; FALLBACK14-NEXT: movq %r11, 24(%rdx)
+; FALLBACK14-NEXT: movq -64(%rsp,%rsi,4), %r8
+; FALLBACK14-NEXT: movq -56(%rsp,%rsi,4), %r9
+; FALLBACK14-NEXT: leaq (%r8,%r8), %r10
+; FALLBACK14-NEXT: shlxq %rax, %r10, %r10
+; FALLBACK14-NEXT: orq %rdi, %r10
+; FALLBACK14-NEXT: shrxq %rcx, %r9, %rdi
+; FALLBACK14-NEXT: movq -48(%rsp,%rsi,4), %rsi
+; FALLBACK14-NEXT: leaq (%rsi,%rsi), %r11
+; FALLBACK14-NEXT: shlxq %rax, %r11, %r11
+; FALLBACK14-NEXT: orq %rdi, %r11
+; FALLBACK14-NEXT: shrxq %rcx, %r8, %rdi
+; FALLBACK14-NEXT: addq %r9, %r9
+; FALLBACK14-NEXT: shlxq %rax, %r9, %rax
+; FALLBACK14-NEXT: orq %rdi, %rax
+; FALLBACK14-NEXT: sarxq %rcx, %rsi, %rcx
+; FALLBACK14-NEXT: movq %rcx, 24(%rdx)
; FALLBACK14-NEXT: movq %rax, 8(%rdx)
-; FALLBACK14-NEXT: movq %rcx, 16(%rdx)
-; FALLBACK14-NEXT: movq %rdi, (%rdx)
+; FALLBACK14-NEXT: movq %r11, 16(%rdx)
+; FALLBACK14-NEXT: movq %r10, (%rdx)
; FALLBACK14-NEXT: retq
;
; FALLBACK15-LABEL: ashr_32bytes_dwordOff:
@@ -12204,10 +12192,8 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
;
; FALLBACK2-LABEL: lshr_64bytes:
; FALLBACK2: # %bb.0:
-; FALLBACK2-NEXT: pushq %rbp
; FALLBACK2-NEXT: pushq %r15
; FALLBACK2-NEXT: pushq %r14
-; FALLBACK2-NEXT: pushq %r13
; FALLBACK2-NEXT: pushq %r12
; FALLBACK2-NEXT: pushq %rbx
; FALLBACK2-NEXT: pushq %rax
@@ -12235,60 +12221,58 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK2-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
; FALLBACK2-NEXT: leal (,%rax,8), %ecx
; FALLBACK2-NEXT: andl $56, %ecx
+; FALLBACK2-NEXT: movl %ecx, %esi
; FALLBACK2-NEXT: andl $56, %eax
-; FALLBACK2-NEXT: movq -120(%rsp,%rax), %rdi
-; FALLBACK2-NEXT: movq -112(%rsp,%rax), %r9
-; FALLBACK2-NEXT: shrxq %rcx, %rdi, %rbx
-; FALLBACK2-NEXT: shrxq %rcx, -128(%rsp,%rax), %r13
-; FALLBACK2-NEXT: movq -104(%rsp,%rax), %rsi
-; FALLBACK2-NEXT: shrxq %rcx, %rsi, %r8
-; FALLBACK2-NEXT: movq -96(%rsp,%rax), %r10
-; FALLBACK2-NEXT: shrxq %rcx, %r9, %r11
-; FALLBACK2-NEXT: movq -88(%rsp,%rax), %r14
-; FALLBACK2-NEXT: shrxq %rcx, %r14, %r15
-; FALLBACK2-NEXT: shrxq %rcx, %r10, %rbp
-; FALLBACK2-NEXT: movl %ecx, %r12d
-; FALLBACK2-NEXT: notb %r12b
-; FALLBACK2-NEXT: addq %r9, %r9
-; FALLBACK2-NEXT: shlxq %r12, %r9, %r9
+; FALLBACK2-NEXT: movq -120(%rsp,%rax), %r8
+; FALLBACK2-NEXT: movq -112(%rsp,%rax), %r10
+; FALLBACK2-NEXT: shrxq %rsi, %r8, %r9
+; FALLBACK2-NEXT: notb %cl
+; FALLBACK2-NEXT: leaq (%r10,%r10), %rdi
+; FALLBACK2-NEXT: shlxq %rcx, %rdi, %rdi
+; FALLBACK2-NEXT: orq %r9, %rdi
+; FALLBACK2-NEXT: shrxq %rsi, -128(%rsp,%rax), %r9
+; FALLBACK2-NEXT: addq %r8, %r8
+; FALLBACK2-NEXT: shlxq %rcx, %r8, %r8
+; FALLBACK2-NEXT: orq %r9, %r8
+; FALLBACK2-NEXT: movq -104(%rsp,%rax), %r11
+; FALLBACK2-NEXT: shrxq %rsi, %r11, %rbx
+; FALLBACK2-NEXT: movq -96(%rsp,%rax), %r14
+; FALLBACK2-NEXT: leaq (%r14,%r14), %r9
+; FALLBACK2-NEXT: shlxq %rcx, %r9, %r9
; FALLBACK2-NEXT: orq %rbx, %r9
-; FALLBACK2-NEXT: addq %rdi, %rdi
-; FALLBACK2-NEXT: shlxq %r12, %rdi, %rdi
-; FALLBACK2-NEXT: orq %r13, %rdi
-; FALLBACK2-NEXT: movq -80(%rsp,%rax), %rbx
-; FALLBACK2-NEXT: shrxq %rcx, %rbx, %r13
-; FALLBACK2-NEXT: movq -72(%rsp,%rax), %rax
-; FALLBACK2-NEXT: shrxq %rcx, %rax, %rcx
+; FALLBACK2-NEXT: shrxq %rsi, %r10, %r10
+; FALLBACK2-NEXT: addq %r11, %r11
+; FALLBACK2-NEXT: shlxq %rcx, %r11, %r11
+; FALLBACK2-NEXT: orq %r10, %r11
+; FALLBACK2-NEXT: movq -88(%rsp,%rax), %r10
+; FALLBACK2-NEXT: shrxq %rsi, %r10, %rbx
+; FALLBACK2-NEXT: movq -80(%rsp,%rax), %r15
+; FALLBACK2-NEXT: leaq (%r15,%r15), %r12
+; FALLBACK2-NEXT: shlxq %rcx, %r12, %r12
+; FALLBACK2-NEXT: orq %rbx, %r12
+; FALLBACK2-NEXT: shrxq %rsi, %r14, %rbx
; FALLBACK2-NEXT: addq %r10, %r10
-; FALLBACK2-NEXT: shlxq %r12, %r10, %r10
-; FALLBACK2-NEXT: orq %r8, %r10
-; FALLBACK2-NEXT: addq %rsi, %rsi
-; FALLBACK2-NEXT: shlxq %r12, %rsi, %rsi
-; FALLBACK2-NEXT: orq %r11, %rsi
-; FALLBACK2-NEXT: leaq (%rbx,%rbx), %r8
-; FALLBACK2-NEXT: shlxq %r12, %r8, %r8
-; FALLBACK2-NEXT: orq %r15, %r8
-; FALLBACK2-NEXT: addq %r14, %r14
-; FALLBACK2-NEXT: shlxq %r12, %r14, %r11
-; FALLBACK2-NEXT: orq %rbp, %r11
-; FALLBACK2-NEXT: addq %rax, %rax
-; FALLBACK2-NEXT: shlxq %r12, %rax, %rax
-; FALLBACK2-NEXT: orq %r13, %rax
-; FALLBACK2-NEXT: movq %rcx, 56(%rdx)
-; FALLBACK2-NEXT: movq %rax, 48(%rdx)
-; FALLBACK2-NEXT: movq %r11, 32(%rdx)
-; FALLBACK2-NEXT: movq %r8, 40(%rdx)
-; FALLBACK2-NEXT: movq %rsi, 16(%rdx)
-; FALLBACK2-NEXT: movq %r10, 24(%rdx)
-; FALLBACK2-NEXT: movq %rdi, (%rdx)
-; FALLBACK2-NEXT: movq %r9, 8(%rdx)
+; FALLBACK2-NEXT: shlxq %rcx, %r10, %r10
+; FALLBACK2-NEXT: orq %rbx, %r10
+; FALLBACK2-NEXT: shrxq %rsi, %r15, %rbx
+; FALLBACK2-NEXT: movq -72(%rsp,%rax), %rax
+; FALLBACK2-NEXT: leaq (%rax,%rax), %r14
+; FALLBACK2-NEXT: shlxq %rcx, %r14, %rcx
+; FALLBACK2-NEXT: orq %rbx, %rcx
+; FALLBACK2-NEXT: shrxq %rsi, %rax, %rax
+; FALLBACK2-NEXT: movq %rax, 56(%rdx)
+; FALLBACK2-NEXT: movq %rcx, 48(%rdx)
+; FALLBACK2-NEXT: movq %r10, 32(%rdx)
+; FALLBACK2-NEXT: movq %r12, 40(%rdx)
+; FALLBACK2-NEXT: movq %r11, 16(%rdx)
+; FALLBACK2-NEXT: movq %r9, 24(%rdx)
+; FALLBACK2-NEXT: movq %r8, (%rdx)
+; FALLBACK2-NEXT: movq %rdi, 8(%rdx)
; FALLBACK2-NEXT: addq $8, %rsp
; FALLBACK2-NEXT: popq %rbx
; FALLBACK2-NEXT: popq %r12
-; FALLBACK2-NEXT: popq %r13
; FALLBACK2-NEXT: popq %r14
; FALLBACK2-NEXT: popq %r15
-; FALLBACK2-NEXT: popq %rbp
; FALLBACK2-NEXT: retq
;
; FALLBACK3-LABEL: lshr_64bytes:
@@ -12512,13 +12496,11 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
;
; FALLBACK6-LABEL: lshr_64bytes:
; FALLBACK6: # %bb.0:
-; FALLBACK6-NEXT: pushq %rbp
; FALLBACK6-NEXT: pushq %r15
; FALLBACK6-NEXT: pushq %r14
; FALLBACK6-NEXT: pushq %r13
; FALLBACK6-NEXT: pushq %r12
; FALLBACK6-NEXT: pushq %rbx
-; FALLBACK6-NEXT: pushq %rax
; FALLBACK6-NEXT: movups (%rdi), %xmm0
; FALLBACK6-NEXT: movups 16(%rdi), %xmm1
; FALLBACK6-NEXT: movups 32(%rdi), %xmm2
@@ -12533,62 +12515,60 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK6-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; FALLBACK6-NEXT: leal (,%rax,8), %esi
-; FALLBACK6-NEXT: andl $56, %esi
+; FALLBACK6-NEXT: leal (,%rax,8), %ecx
+; FALLBACK6-NEXT: andl $56, %ecx
+; FALLBACK6-NEXT: movl %ecx, %esi
; FALLBACK6-NEXT: andl $56, %eax
-; FALLBACK6-NEXT: shrxq %rsi, -128(%rsp,%rax), %r11
-; FALLBACK6-NEXT: movq -112(%rsp,%rax), %rcx
-; FALLBACK6-NEXT: movq -104(%rsp,%rax), %rdi
-; FALLBACK6-NEXT: shrxq %rsi, %rdi, %r12
-; FALLBACK6-NEXT: movq -96(%rsp,%rax), %r13
-; FALLBACK6-NEXT: shrxq %rsi, %rcx, %r9
-; FALLBACK6-NEXT: movq -88(%rsp,%rax), %r10
-; FALLBACK6-NEXT: shrxq %rsi, %r10, %r14
-; FALLBACK6-NEXT: shrxq %rsi, %r13, %r15
-; FALLBACK6-NEXT: movl %esi, %ebx
-; FALLBACK6-NEXT: notb %bl
-; FALLBACK6-NEXT: movq -120(%rsp,%rax), %rbp
-; FALLBACK6-NEXT: leaq (%rbp,%rbp), %r8
-; FALLBACK6-NEXT: shlxq %rbx, %r8, %r8
-; FALLBACK6-NEXT: orq %r11, %r8
-; FALLBACK6-NEXT: leaq (%r13,%r13), %r11
-; FALLBACK6-NEXT: shlxq %rbx, %r11, %r11
-; FALLBACK6-NEXT: orq %r12, %r11
+; FALLBACK6-NEXT: shrxq %rsi, -128(%rsp,%rax), %r8
+; FALLBACK6-NEXT: notb %cl
+; FALLBACK6-NEXT: movq -120(%rsp,%rax), %r10
+; FALLBACK6-NEXT: movq -112(%rsp,%rax), %r9
+; FALLBACK6-NEXT: leaq (%r10,%r10), %rdi
+; FALLBACK6-NEXT: shlxq %rcx, %rdi, %rdi
+; FALLBACK6-NEXT: orq %r8, %rdi
+; FALLBACK6-NEXT: movq -104(%rsp,%rax), %r11
+; FALLBACK6-NEXT: shrxq %rsi, %r11, %rbx
+; FALLBACK6-NEXT: movq -96(%rsp,%rax), %r14
+; FALLBACK6-NEXT: leaq (%r14,%r14), %r8
+; FALLBACK6-NEXT: shlxq %rcx, %r8, %r8
+; FALLBACK6-NEXT: orq %rbx, %r8
+; FALLBACK6-NEXT: shrxq %rsi, %r9, %rbx
+; FALLBACK6-NEXT: addq %r11, %r11
+; FALLBACK6-NEXT: shlxq %rcx, %r11, %r11
+; FALLBACK6-NEXT: orq %rbx, %r11
+; FALLBACK6-NEXT: movq -88(%rsp,%rax), %rbx
+; FALLBACK6-NEXT: shrxq %rsi, %rbx, %r15
; FALLBACK6-NEXT: movq -80(%rsp,%rax), %r12
-; FALLBACK6-NEXT: shrxq %rsi, %r12, %r13
-; FALLBACK6-NEXT: shrxq %rsi, %rbp, %rbp
+; FALLBACK6-NEXT: leaq (%r12,%r12), %r13
+; FALLBACK6-NEXT: shlxq %rcx, %r13, %r13
+; FALLBACK6-NEXT: orq %r15, %r13
+; FALLBACK6-NEXT: shrxq %rsi, %r14, %r14
+; FALLBACK6-NEXT: addq %rbx, %rbx
+; FALLBACK6-NEXT: shlxq %rcx, %rbx, %rbx
+; FALLBACK6-NEXT: orq %r14, %rbx
+; FALLBACK6-NEXT: shrxq %rsi, %r12, %r14
; FALLBACK6-NEXT: movq -72(%rsp,%rax), %rax
-; FALLBACK6-NEXT: shrxq %rsi, %rax, %rsi
-; FALLBACK6-NEXT: addq %rdi, %rdi
-; FALLBACK6-NEXT: shlxq %rbx, %rdi, %rdi
-; FALLBACK6-NEXT: orq %r9, %rdi
-; FALLBACK6-NEXT: leaq (%r12,%r12), %r9
-; FALLBACK6-NEXT: shlxq %rbx, %r9, %r9
-; FALLBACK6-NEXT: orq %r14, %r9
-; FALLBACK6-NEXT: addq %r10, %r10
-; FALLBACK6-NEXT: shlxq %rbx, %r10, %r10
-; FALLBACK6-NEXT: orq %r15, %r10
-; FALLBACK6-NEXT: addq %rax, %rax
-; FALLBACK6-NEXT: shlxq %rbx, %rax, %rax
-; FALLBACK6-NEXT: orq %r13, %rax
-; FALLBACK6-NEXT: addq %rcx, %rcx
-; FALLBACK6-NEXT: shlxq %rbx, %rcx, %rcx
-; FALLBACK6-NEXT: orq %rbp, %rcx
-; FALLBACK6-NEXT: movq %rsi, 56(%rdx)
+; FALLBACK6-NEXT: leaq (%rax,%rax), %r15
+; FALLBACK6-NEXT: shlxq %rcx, %r15, %r15
+; FALLBACK6-NEXT: orq %r14, %r15
+; FALLBACK6-NEXT: shrxq %rsi, %r10, %r10
+; FALLBACK6-NEXT: addq %r9, %r9
+; FALLBACK6-NEXT: shlxq %rcx, %r9, %rcx
+; FALLBACK6-NEXT: orq %r10, %rcx
+; FALLBACK6-NEXT: shrxq %rsi, %rax, %rax
+; FALLBACK6-NEXT: movq %rax, 56(%rdx)
; FALLBACK6-NEXT: movq %rcx, 8(%rdx)
-; FALLBACK6-NEXT: movq %rax, 48(%rdx)
-; FALLBACK6-NEXT: movq %r10, 32(%rdx)
-; FALLBACK6-NEXT: movq %r9, 40(%rdx)
-; FALLBACK6-NEXT: movq %rdi, 16(%rdx)
-; FALLBACK6-NEXT: movq %r11, 24(%rdx)
-; FALLBACK6-NEXT: movq %r8, (%rdx)
-; FALLBACK6-NEXT: addq $8, %rsp
+; FALLBACK6-NEXT: movq %r15, 48(%rdx)
+; FALLBACK6-NEXT: movq %rbx, 32(%rdx)
+; FALLBACK6-NEXT: movq %r13, 40(%rdx)
+; FALLBACK6-NEXT: movq %r11, 16(%rdx)
+; FALLBACK6-NEXT: movq %r8, 24(%rdx)
+; FALLBACK6-NEXT: movq %rdi, (%rdx)
; FALLBACK6-NEXT: popq %rbx
; FALLBACK6-NEXT: popq %r12
; FALLBACK6-NEXT: popq %r13
; FALLBACK6-NEXT: popq %r14
; FALLBACK6-NEXT: popq %r15
-; FALLBACK6-NEXT: popq %rbp
; FALLBACK6-NEXT: retq
;
; FALLBACK7-LABEL: lshr_64bytes:
@@ -12749,43 +12729,43 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK9-NEXT: pushq %rbx
; FALLBACK9-NEXT: vmovups (%rdi), %ymm0
; FALLBACK9-NEXT: vmovups 32(%rdi), %ymm1
-; FALLBACK9-NEXT: movl (%rsi), %eax
+; FALLBACK9-NEXT: movl (%rsi), %edi
; FALLBACK9-NEXT: vxorps %xmm2, %xmm2, %xmm2
; FALLBACK9-NEXT: vmovups %ymm2, -{{[0-9]+}}(%rsp)
; FALLBACK9-NEXT: vmovups %ymm2, -{{[0-9]+}}(%rsp)
; FALLBACK9-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
; FALLBACK9-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; FALLBACK9-NEXT: leal (,%rax,8), %ecx
+; FALLBACK9-NEXT: leal (,%rdi,8), %ecx
; FALLBACK9-NEXT: andl $56, %ecx
-; FALLBACK9-NEXT: andl $56, %eax
-; FALLBACK9-NEXT: movq -96(%rsp,%rax), %rdi
-; FALLBACK9-NEXT: movq -104(%rsp,%rax), %r9
-; FALLBACK9-NEXT: movq %r9, %rsi
-; FALLBACK9-NEXT: shrdq %cl, %rdi, %rsi
-; FALLBACK9-NEXT: movq -112(%rsp,%rax), %r10
+; FALLBACK9-NEXT: andl $56, %edi
+; FALLBACK9-NEXT: movq -96(%rsp,%rdi), %rsi
+; FALLBACK9-NEXT: movq -104(%rsp,%rdi), %r9
+; FALLBACK9-NEXT: movq %r9, %rax
+; FALLBACK9-NEXT: shrdq %cl, %rsi, %rax
+; FALLBACK9-NEXT: movq -112(%rsp,%rdi), %r10
; FALLBACK9-NEXT: movq %r10, %r8
; FALLBACK9-NEXT: shrdq %cl, %r9, %r8
-; FALLBACK9-NEXT: movq -80(%rsp,%rax), %r9
-; FALLBACK9-NEXT: movq -88(%rsp,%rax), %r11
+; FALLBACK9-NEXT: movq -80(%rsp,%rdi), %r9
+; FALLBACK9-NEXT: movq -88(%rsp,%rdi), %r11
; FALLBACK9-NEXT: movq %r11, %rbx
; FALLBACK9-NEXT: shrdq %cl, %r9, %rbx
-; FALLBACK9-NEXT: shrdq %cl, %r11, %rdi
-; FALLBACK9-NEXT: movq -72(%rsp,%rax), %r11
+; FALLBACK9-NEXT: shrdq %cl, %r11, %rsi
+; FALLBACK9-NEXT: movq -72(%rsp,%rdi), %r11
; FALLBACK9-NEXT: shrdq %cl, %r11, %r9
-; FALLBACK9-NEXT: movq -128(%rsp,%rax), %r14
-; FALLBACK9-NEXT: movq -120(%rsp,%rax), %rax
-; FALLBACK9-NEXT: movq %rax, %r15
+; FALLBACK9-NEXT: movq -128(%rsp,%rdi), %r14
+; FALLBACK9-NEXT: movq -120(%rsp,%rdi), %rdi
+; FALLBACK9-NEXT: movq %rdi, %r15
; FALLBACK9-NEXT: shrdq %cl, %r10, %r15
-; FALLBACK9-NEXT: shrdq %cl, %rax, %r14
+; FALLBACK9-NEXT: shrdq %cl, %rdi, %r14
; FALLBACK9-NEXT: # kill: def $cl killed $cl killed $ecx
; FALLBACK9-NEXT: shrq %cl, %r11
; FALLBACK9-NEXT: movq %r15, 8(%rdx)
; FALLBACK9-NEXT: movq %r9, 48(%rdx)
; FALLBACK9-NEXT: movq %r11, 56(%rdx)
-; FALLBACK9-NEXT: movq %rdi, 32(%rdx)
+; FALLBACK9-NEXT: movq %rsi, 32(%rdx)
; FALLBACK9-NEXT: movq %rbx, 40(%rdx)
; FALLBACK9-NEXT: movq %r8, 16(%rdx)
-; FALLBACK9-NEXT: movq %rsi, 24(%rdx)
+; FALLBACK9-NEXT: movq %rax, 24(%rdx)
; FALLBACK9-NEXT: movq %r14, (%rdx)
; FALLBACK9-NEXT: popq %rbx
; FALLBACK9-NEXT: popq %r14
@@ -12795,77 +12775,73 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
;
; FALLBACK10-LABEL: lshr_64bytes:
; FALLBACK10: # %bb.0:
-; FALLBACK10-NEXT: pushq %rbp
; FALLBACK10-NEXT: pushq %r15
; FALLBACK10-NEXT: pushq %r14
; FALLBACK10-NEXT: pushq %r13
; FALLBACK10-NEXT: pushq %r12
; FALLBACK10-NEXT: pushq %rbx
-; FALLBACK10-NEXT: pushq %rax
; FALLBACK10-NEXT: vmovups (%rdi), %ymm0
; FALLBACK10-NEXT: vmovups 32(%rdi), %ymm1
-; FALLBACK10-NEXT: movl (%rsi), %eax
+; FALLBACK10-NEXT: movl (%rsi), %esi
; FALLBACK10-NEXT: vxorps %xmm2, %xmm2, %xmm2
; FALLBACK10-NEXT: vmovups %ymm2, -{{[0-9]+}}(%rsp)
; FALLBACK10-NEXT: vmovups %ymm2, -{{[0-9]+}}(%rsp)
; FALLBACK10-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
; FALLBACK10-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; FALLBACK10-NEXT: leal (,%rax,8), %esi
-; FALLBACK10-NEXT: andl $56, %esi
+; FALLBACK10-NEXT: leal (,%rsi,8), %eax
; FALLBACK10-NEXT: andl $56, %eax
-; FALLBACK10-NEXT: shrxq %rsi, -128(%rsp,%rax), %r11
-; FALLBACK10-NEXT: movq -112(%rsp,%rax), %rcx
-; FALLBACK10-NEXT: movq -104(%rsp,%rax), %rdi
-; FALLBACK10-NEXT: shrxq %rsi, %rdi, %r12
-; FALLBACK10-NEXT: movq -96(%rsp,%rax), %r13
-; FALLBACK10-NEXT: shrxq %rsi, %rcx, %r9
-; FALLBACK10-NEXT: movq -88(%rsp,%rax), %r10
-; FALLBACK10-NEXT: shrxq %rsi, %r10, %r14
-; FALLBACK10-NEXT: shrxq %rsi, %r13, %r15
-; FALLBACK10-NEXT: movl %esi, %ebx
-; FALLBACK10-NEXT: notb %bl
-; FALLBACK10-NEXT: movq -120(%rsp,%rax), %rbp
-; FALLBACK10-NEXT: leaq (%rbp,%rbp), %r8
-; FALLBACK10-NEXT: shlxq %rbx, %r8, %r8
-; FALLBACK10-NEXT: orq %r11, %r8
-; FALLBACK10-NEXT: leaq (%r13,%r13), %r11
-; FALLBACK10-NEXT: shlxq %rbx, %r11, %r11
-; FALLBACK10-NEXT: orq %r12, %r11
-; FALLBACK10-NEXT: movq -80(%rsp,%rax), %r12
-; FALLBACK10-NEXT: shrxq %rsi, %r12, %r13
-; FALLBACK10-NEXT: shrxq %rsi, %rbp, %rbp
-; FALLBACK10-NEXT: movq -72(%rsp,%rax), %rax
-; FALLBACK10-NEXT: shrxq %rsi, %rax, %rsi
-; FALLBACK10-NEXT: addq %rdi, %rdi
-; FALLBACK10-NEXT: shlxq %rbx, %rdi, %rdi
-; FALLBACK10-NEXT: orq %r9, %rdi
-; FALLBACK10-NEXT: leaq (%r12,%r12), %r9
-; FALLBACK10-NEXT: shlxq %rbx, %r9, %r9
-; FALLBACK10-NEXT: orq %r14, %r9
-; FALLBACK10-NEXT: addq %r10, %r10
-; FALLBACK10-NEXT: shlxq %rbx, %r10, %r10
-; FALLBACK10-NEXT: orq %r15, %r10
-; FALLBACK10-NEXT: addq %rax, %rax
-; FALLBACK10-NEXT: shlxq %rbx, %rax, %rax
-; FALLBACK10-NEXT: orq %r13, %rax
-; FALLBACK10-NEXT: addq %rcx, %rcx
-; FALLBACK10-NEXT: shlxq %rbx, %rcx, %rcx
-; FALLBACK10-NEXT: orq %rbp, %rcx
-; FALLBACK10-NEXT: movq %rsi, 56(%rdx)
-; FALLBACK10-NEXT: movq %rcx, 8(%rdx)
-; FALLBACK10-NEXT: movq %rax, 48(%rdx)
-; FALLBACK10-NEXT: movq %r10, 32(%rdx)
-; FALLBACK10-NEXT: movq %r9, 40(%rdx)
-; FALLBACK10-NEXT: movq %rdi, 16(%rdx)
-; FALLBACK10-NEXT: movq %r11, 24(%rdx)
-; FALLBACK10-NEXT: movq %r8, (%rdx)
-; FALLBACK10-NEXT: addq $8, %rsp
+; FALLBACK10-NEXT: movl %eax, %ecx
+; FALLBACK10-NEXT: andl $56, %esi
+; FALLBACK10-NEXT: shrxq %rcx, -128(%rsp,%rsi), %r8
+; FALLBACK10-NEXT: notb %al
+; FALLBACK10-NEXT: movq -120(%rsp,%rsi), %r10
+; FALLBACK10-NEXT: movq -112(%rsp,%rsi), %r9
+; FALLBACK10-NEXT: leaq (%r10,%r10), %rdi
+; FALLBACK10-NEXT: shlxq %rax, %rdi, %rdi
+; FALLBACK10-NEXT: orq %r8, %rdi
+; FALLBACK10-NEXT: movq -104(%rsp,%rsi), %r11
+; FALLBACK10-NEXT: shrxq %rcx, %r11, %rbx
+; FALLBACK10-NEXT: movq -96(%rsp,%rsi), %r14
+; FALLBACK10-NEXT: leaq (%r14,%r14), %r8
+; FALLBACK10-NEXT: shlxq %rax, %r8, %r8
+; FALLBACK10-NEXT: orq %rbx, %r8
+; FALLBACK10-NEXT: shrxq %rcx, %r9, %rbx
+; FALLBACK10-NEXT: addq %r11, %r11
+; FALLBACK10-NEXT: shlxq %rax, %r11, %r11
+; FALLBACK10-NEXT: orq %rbx, %r11
+; FALLBACK10-NEXT: movq -88(%rsp,%rsi), %rbx
+; FALLBACK10-NEXT: shrxq %rcx, %rbx, %r15
+; FALLBACK10-NEXT: movq -80(%rsp,%rsi), %r12
+; FALLBACK10-NEXT: leaq (%r12,%r12), %r13
+; FALLBACK10-NEXT: shlxq %rax, %r13, %r13
+; FALLBACK10-NEXT: orq %r15, %r13
+; FALLBACK10-NEXT: shrxq %rcx, %r14, %r14
+; FALLBACK10-NEXT: addq %rbx, %rbx
+; FALLBACK10-NEXT: shlxq %rax, %rbx, %rbx
+; FALLBACK10-NEXT: orq %r14, %rbx
+; FALLBACK10-NEXT: shrxq %rcx, %r12, %r14
+; FALLBACK10-NEXT: movq -72(%rsp,%rsi), %rsi
+; FALLBACK10-NEXT: leaq (%rsi,%rsi), %r15
+; FALLBACK10-NEXT: shlxq %rax, %r15, %r15
+; FALLBACK10-NEXT: orq %r14, %r15
+; FALLBACK10-NEXT: shrxq %rcx, %r10, %r10
+; FALLBACK10-NEXT: addq %r9, %r9
+; FALLBACK10-NEXT: shlxq %rax, %r9, %rax
+; FALLBACK10-NEXT: orq %r10, %rax
+; FALLBACK10-NEXT: shrxq %rcx, %rsi, %rcx
+; FALLBACK10-NEXT: movq %rcx, 56(%rdx)
+; FALLBACK10-NEXT: movq %rax, 8(%rdx)
+; FALLBACK10-NEXT: movq %r15, 48(%rdx)
+; FALLBACK10-NEXT: movq %rbx, 32(%rdx)
+; FALLBACK10-NEXT: movq %r13, 40(%rdx)
+; FALLBACK10-NEXT: movq %r11, 16(%rdx)
+; FALLBACK10-NEXT: movq %r8, 24(%rdx)
+; FALLBACK10-NEXT: movq %rdi, (%rdx)
; FALLBACK10-NEXT: popq %rbx
; FALLBACK10-NEXT: popq %r12
; FALLBACK10-NEXT: popq %r13
; FALLBACK10-NEXT: popq %r14
; FALLBACK10-NEXT: popq %r15
-; FALLBACK10-NEXT: popq %rbp
; FALLBACK10-NEXT: vzeroupper
; FALLBACK10-NEXT: retq
;
@@ -12930,45 +12906,45 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK12-NEXT: pushq %rbx
; FALLBACK12-NEXT: pushq %rax
; FALLBACK12-NEXT: vmovups (%rdi), %zmm0
-; FALLBACK12-NEXT: movl (%rsi), %r9d
+; FALLBACK12-NEXT: movl (%rsi), %r10d
; FALLBACK12-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FALLBACK12-NEXT: vmovups %zmm1, -{{[0-9]+}}(%rsp)
; FALLBACK12-NEXT: vmovups %zmm0, -{{[0-9]+}}(%rsp)
-; FALLBACK12-NEXT: leal (,%r9,8), %eax
+; FALLBACK12-NEXT: leal (,%r10,8), %eax
; FALLBACK12-NEXT: andl $56, %eax
-; FALLBACK12-NEXT: andl $56, %r9d
-; FALLBACK12-NEXT: movq -128(%rsp,%r9), %r10
-; FALLBACK12-NEXT: movq -120(%rsp,%r9), %r8
+; FALLBACK12-NEXT: andl $56, %r10d
+; FALLBACK12-NEXT: movq -128(%rsp,%r10), %r9
+; FALLBACK12-NEXT: movq -120(%rsp,%r10), %r8
; FALLBACK12-NEXT: movl %eax, %ecx
-; FALLBACK12-NEXT: shrq %cl, %r10
+; FALLBACK12-NEXT: shrq %cl, %r9
; FALLBACK12-NEXT: movl %eax, %esi
; FALLBACK12-NEXT: notb %sil
; FALLBACK12-NEXT: leaq (%r8,%r8), %rdi
; FALLBACK12-NEXT: movl %esi, %ecx
; FALLBACK12-NEXT: shlq %cl, %rdi
-; FALLBACK12-NEXT: orq %r10, %rdi
-; FALLBACK12-NEXT: movq -104(%rsp,%r9), %r10
-; FALLBACK12-NEXT: movq %r10, %rbx
+; FALLBACK12-NEXT: orq %r9, %rdi
+; FALLBACK12-NEXT: movq -104(%rsp,%r10), %r9
+; FALLBACK12-NEXT: movq %r9, %rbx
; FALLBACK12-NEXT: movl %eax, %ecx
; FALLBACK12-NEXT: shrq %cl, %rbx
-; FALLBACK12-NEXT: movq -96(%rsp,%r9), %r12
+; FALLBACK12-NEXT: movq -96(%rsp,%r10), %r12
; FALLBACK12-NEXT: leaq (%r12,%r12), %r11
; FALLBACK12-NEXT: movl %esi, %ecx
; FALLBACK12-NEXT: shlq %cl, %r11
; FALLBACK12-NEXT: orq %rbx, %r11
-; FALLBACK12-NEXT: movq -112(%rsp,%r9), %rbx
+; FALLBACK12-NEXT: movq -112(%rsp,%r10), %rbx
; FALLBACK12-NEXT: movq %rbx, %r14
; FALLBACK12-NEXT: movl %eax, %ecx
; FALLBACK12-NEXT: shrq %cl, %r14
-; FALLBACK12-NEXT: addq %r10, %r10
+; FALLBACK12-NEXT: addq %r9, %r9
; FALLBACK12-NEXT: movl %esi, %ecx
-; FALLBACK12-NEXT: shlq %cl, %r10
-; FALLBACK12-NEXT: orq %r14, %r10
-; FALLBACK12-NEXT: movq -88(%rsp,%r9), %r14
+; FALLBACK12-NEXT: shlq %cl, %r9
+; FALLBACK12-NEXT: orq %r14, %r9
+; FALLBACK12-NEXT: movq -88(%rsp,%r10), %r14
; FALLBACK12-NEXT: movq %r14, %r13
; FALLBACK12-NEXT: movl %eax, %ecx
; FALLBACK12-NEXT: shrq %cl, %r13
-; FALLBACK12-NEXT: movq -80(%rsp,%r9), %rbp
+; FALLBACK12-NEXT: movq -80(%rsp,%r10), %rbp
; FALLBACK12-NEXT: leaq (%rbp,%rbp), %r15
; FALLBACK12-NEXT: movl %esi, %ecx
; FALLBACK12-NEXT: shlq %cl, %r15
@@ -12981,8 +12957,8 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK12-NEXT: orq %r12, %r14
; FALLBACK12-NEXT: movl %eax, %ecx
; FALLBACK12-NEXT: shrq %cl, %rbp
-; FALLBACK12-NEXT: movq -72(%rsp,%r9), %r9
-; FALLBACK12-NEXT: leaq (%r9,%r9), %r12
+; FALLBACK12-NEXT: movq -72(%rsp,%r10), %r10
+; FALLBACK12-NEXT: leaq (%r10,%r10), %r12
; FALLBACK12-NEXT: movl %esi, %ecx
; FALLBACK12-NEXT: shlq %cl, %r12
; FALLBACK12-NEXT: orq %rbp, %r12
@@ -12993,13 +12969,13 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK12-NEXT: shlq %cl, %rbx
; FALLBACK12-NEXT: orq %r8, %rbx
; FALLBACK12-NEXT: movl %eax, %ecx
-; FALLBACK12-NEXT: shrq %cl, %r9
-; FALLBACK12-NEXT: movq %r9, 56(%rdx)
+; FALLBACK12-NEXT: shrq %cl, %r10
+; FALLBACK12-NEXT: movq %r10, 56(%rdx)
; FALLBACK12-NEXT: movq %rbx, 8(%rdx)
; FALLBACK12-NEXT: movq %r12, 48(%rdx)
; FALLBACK12-NEXT: movq %r14, 32(%rdx)
; FALLBACK12-NEXT: movq %r15, 40(%rdx)
-; FALLBACK12-NEXT: movq %r10, 16(%rdx)
+; FALLBACK12-NEXT: movq %r9, 16(%rdx)
; FALLBACK12-NEXT: movq %r11, 24(%rdx)
; FALLBACK12-NEXT: movq %rdi, (%rdx)
; FALLBACK12-NEXT: addq $8, %rsp
@@ -13062,74 +13038,70 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
;
; FALLBACK14-LABEL: lshr_64bytes:
; FALLBACK14: # %bb.0:
-; FALLBACK14-NEXT: pushq %rbp
; FALLBACK14-NEXT: pushq %r15
; FALLBACK14-NEXT: pushq %r14
; FALLBACK14-NEXT: pushq %r13
; FALLBACK14-NEXT: pushq %r12
; FALLBACK14-NEXT: pushq %rbx
-; FALLBACK14-NEXT: pushq %rax
; FALLBACK14-NEXT: vmovups (%rdi), %zmm0
; FALLBACK14-NEXT: movl (%rsi), %esi
; FALLBACK14-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FALLBACK14-NEXT: vmovups %zmm1, -{{[0-9]+}}(%rsp)
; FALLBACK14-NEXT: vmovups %zmm0, -{{[0-9]+}}(%rsp)
-; FALLBACK14-NEXT: leal (,%rsi,8), %ecx
-; FALLBACK14-NEXT: andl $56, %ecx
+; FALLBACK14-NEXT: leal (,%rsi,8), %eax
+; FALLBACK14-NEXT: andl $56, %eax
+; FALLBACK14-NEXT: movl %eax, %ecx
; FALLBACK14-NEXT: andl $56, %esi
-; FALLBACK14-NEXT: shrxq %rcx, -128(%rsp,%rsi), %r11
-; FALLBACK14-NEXT: movq -112(%rsp,%rsi), %rax
-; FALLBACK14-NEXT: movq -104(%rsp,%rsi), %rdi
-; FALLBACK14-NEXT: shrxq %rcx, %rdi, %r12
-; FALLBACK14-NEXT: movq -96(%rsp,%rsi), %r13
-; FALLBACK14-NEXT: shrxq %rcx, %rax, %r9
-; FALLBACK14-NEXT: movq -88(%rsp,%rsi), %r10
-; FALLBACK14-NEXT: shrxq %rcx, %r10, %r14
-; FALLBACK14-NEXT: shrxq %rcx, %r13, %r15
-; FALLBACK14-NEXT: movl %ecx, %ebx
-; FALLBACK14-NEXT: notb %bl
-; FALLBACK14-NEXT: movq -120(%rsp,%rsi), %rbp
-; FALLBACK14-NEXT: leaq (%rbp,%rbp), %r8
-; FALLBACK14-NEXT: shlxq %rbx, %r8, %r8
-; FALLBACK14-NEXT: orq %r11, %r8
-; FALLBACK14-NEXT: leaq (%r13,%r13), %r11
-; FALLBACK14-NEXT: shlxq %rbx, %r11, %r11
-; FALLBACK14-NEXT: orq %r12, %r11
+; FALLBACK14-NEXT: shrxq %rcx, -128(%rsp,%rsi), %r8
+; FALLBACK14-NEXT: notb %al
+; FALLBACK14-NEXT: movq -120(%rsp,%rsi), %r10
+; FALLBACK14-NEXT: movq -112(%rsp,%rsi), %r9
+; FALLBACK14-NEXT: leaq (%r10,%r10), %rdi
+; FALLBACK14-NEXT: shlxq %rax, %rdi, %rdi
+; FALLBACK14-NEXT: orq %r8, %rdi
+; FALLBACK14-NEXT: movq -104(%rsp,%rsi), %r11
+; FALLBACK14-NEXT: shrxq %rcx, %r11, %rbx
+; FALLBACK14-NEXT: movq -96(%rsp,%rsi), %r14
+; FALLBACK14-NEXT: leaq (%r14,%r14), %r8
+; FALLBACK14-NEXT: shlxq %rax, %r8, %r8
+; FALLBACK14-NEXT: orq %rbx, %r8
+; FALLBACK14-NEXT: shrxq %rcx, %r9, %rbx
+; FALLBACK14-NEXT: addq %r11, %r11
+; FALLBACK14-NEXT: shlxq %rax, %r11, %r11
+; FALLBACK14-NEXT: orq %rbx, %r11
+; FALLBACK14-NEXT: movq -88(%rsp,%rsi), %rbx
+; FALLBACK14-NEXT: shrxq %rcx, %rbx, %r15
; FALLBACK14-NEXT: movq -80(%rsp,%rsi), %r12
-; FALLBACK14-NEXT: shrxq %rcx, %r12, %r13
-; FALLBACK14-NEXT: shrxq %rcx, %rbp, %rbp
+; FALLBACK14-NEXT: leaq (%r12,%r12), %r13
+; FALLBACK14-NEXT: shlxq %rax, %r13, %r13
+; FALLBACK14-NEXT: orq %r15, %r13
+; FALLBACK14-NEXT: shrxq %rcx, %r14, %r14
+; FALLBACK14-NEXT: addq %rbx, %rbx
+; FALLBACK14-NEXT: shlxq %rax, %rbx, %rbx
+; FALLBACK14-NEXT: orq %r14, %rbx
+; FALLBACK14-NEXT: shrxq %rcx, %r12, %r14
; FALLBACK14-NEXT: movq -72(%rsp,%rsi), %rsi
+; FALLBACK14-NEXT: leaq (%rsi,%rsi), %r15
+; FALLBACK14-NEXT: shlxq %rax, %r15, %r15
+; FALLBACK14-NEXT: orq %r14, %r15
+; FALLBACK14-NEXT: shrxq %rcx, %r10, %r10
+; FALLBACK14-NEXT: addq %r9, %r9
+; FALLBACK14-NEXT: shlxq %rax, %r9, %rax
+; FALLBACK14-NEXT: orq %r10, %rax
; FALLBACK14-NEXT: shrxq %rcx, %rsi, %rcx
-; FALLBACK14-NEXT: addq %rdi, %rdi
-; FALLBACK14-NEXT: shlxq %rbx, %rdi, %rdi
-; FALLBACK14-NEXT: orq %r9, %rdi
-; FALLBACK14-NEXT: leaq (%r12,%r12), %r9
-; FALLBACK14-NEXT: shlxq %rbx, %r9, %r9
-; FALLBACK14-NEXT: orq %r14, %r9
-; FALLBACK14-NEXT: addq %r10, %r10
-; FALLBACK14-NEXT: shlxq %rbx, %r10, %r10
-; FALLBACK14-NEXT: orq %r15, %r10
-; FALLBACK14-NEXT: addq %rsi, %rsi
-; FALLBACK14-NEXT: shlxq %rbx, %rsi, %rsi
-; FALLBACK14-NEXT: orq %r13, %rsi
-; FALLBACK14-NEXT: addq %rax, %rax
-; FALLBACK14-NEXT: shlxq %rbx, %rax, %rax
-; FALLBACK14-NEXT: orq %rbp, %rax
; FALLBACK14-NEXT: movq %rcx, 56(%rdx)
; FALLBACK14-NEXT: movq %rax, 8(%rdx)
-; FALLBACK14-NEXT: movq %rsi, 48(%rdx)
-; FALLBACK14-NEXT: movq %r10, 32(%rdx)
-; FALLBACK14-NEXT: movq %r9, 40(%rdx)
-; FALLBACK14-NEXT: movq %rdi, 16(%rdx)
-; FALLBACK14-NEXT: movq %r11, 24(%rdx)
-; FALLBACK14-NEXT: movq %r8, (%rdx)
-; FALLBACK14-NEXT: addq $8, %rsp
+; FALLBACK14-NEXT: movq %r15, 48(%rdx)
+; FALLBACK14-NEXT: movq %rbx, 32(%rdx)
+; FALLBACK14-NEXT: movq %r13, 40(%rdx)
+; FALLBACK14-NEXT: movq %r11, 16(%rdx)
+; FALLBACK14-NEXT: movq %r8, 24(%rdx)
+; FALLBACK14-NEXT: movq %rdi, (%rdx)
; FALLBACK14-NEXT: popq %rbx
; FALLBACK14-NEXT: popq %r12
; FALLBACK14-NEXT: popq %r13
; FALLBACK14-NEXT: popq %r14
; FALLBACK14-NEXT: popq %r15
-; FALLBACK14-NEXT: popq %rbp
; FALLBACK14-NEXT: vzeroupper
; FALLBACK14-NEXT: retq
;
@@ -13139,40 +13111,40 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK15-NEXT: pushq %r14
; FALLBACK15-NEXT: pushq %rbx
; FALLBACK15-NEXT: vmovups (%rdi), %zmm0
-; FALLBACK15-NEXT: movl (%rsi), %eax
+; FALLBACK15-NEXT: movl (%rsi), %edi
; FALLBACK15-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FALLBACK15-NEXT: vmovups %zmm1, -{{[0-9]+}}(%rsp)
; FALLBACK15-NEXT: vmovups %zmm0, -{{[0-9]+}}(%rsp)
-; FALLBACK15-NEXT: leal (,%rax,8), %ecx
+; FALLBACK15-NEXT: leal (,%rdi,8), %ecx
; FALLBACK15-NEXT: andl $56, %ecx
-; FALLBACK15-NEXT: andl $56, %eax
-; FALLBACK15-NEXT: movq -96(%rsp,%rax), %rdi
-; FALLBACK15-NEXT: movq -104(%rsp,%rax), %r9
-; FALLBACK15-NEXT: movq %r9, %rsi
-; FALLBACK15-NEXT: shrdq %cl, %rdi, %rsi
-; FALLBACK15-NEXT: movq -112(%rsp,%rax), %r10
+; FALLBACK15-NEXT: andl $56, %edi
+; FALLBACK15-NEXT: movq -96(%rsp,%rdi), %rsi
+; FALLBACK15-NEXT: movq -104(%rsp,%rdi), %r9
+; FALLBACK15-NEXT: movq %r9, %rax
+; FALLBACK15-NEXT: shrdq %cl, %rsi, %rax
+; FALLBACK15-NEXT: movq -112(%rsp,%rdi), %r10
; FALLBACK15-NEXT: movq %r10, %r8
; FALLBACK15-NEXT: shrdq %cl, %r9, %r8
-; FALLBACK15-NEXT: movq -80(%rsp,%rax), %r9
-; FALLBACK15-NEXT: movq -88(%rsp,%rax), %r11
+; FALLBACK15-NEXT: movq -80(%rsp,%rdi), %r9
+; FALLBACK15-NEXT: movq -88(%rsp,%rdi), %r11
; FALLBACK15-NEXT: movq %r11, %rbx
; FALLBACK15-NEXT: shrdq %cl, %r9, %rbx
-; FALLBACK15-NEXT: shrdq %cl, %r11, %rdi
-; FALLBACK15-NEXT: movq -72(%rsp,%rax), %r11
+; FALLBACK15-NEXT: shrdq %cl, %r11, %rsi
+; FALLBACK15-NEXT: movq -72(%rsp,%rdi), %r11
; FALLBACK15-NEXT: shrdq %cl, %r11, %r9
-; FALLBACK15-NEXT: movq -128(%rsp,%rax), %r14
-; FALLBACK15-NEXT: movq -120(%rsp,%rax), %rax
-; FALLBACK15-NEXT: movq %rax, %r15
+; FALLBACK15-NEXT: movq -128(%rsp,%rdi), %r14
+; FALLBACK15-NEXT: movq -120(%rsp,%rdi), %rdi
+; FALLBACK15-NEXT: movq %rdi, %r15
; FALLBACK15-NEXT: shrdq %cl, %r10, %r15
; FALLBACK15-NEXT: shrxq %rcx, %r11, %r10
; FALLBACK15-NEXT: # kill: def $cl killed $cl killed $rcx
-; FALLBACK15-NEXT: shrdq %cl, %rax, %r14
+; FALLBACK15-NEXT: shrdq %cl, %rdi, %r14
; FALLBACK15-NEXT: movq %r15, 8(%rdx)
; FALLBACK15-NEXT: movq %r9, 48(%rdx)
-; FALLBACK15-NEXT: movq %rdi, 32(%rdx)
+; FALLBACK15-NEXT: movq %rsi, 32(%rdx)
; FALLBACK15-NEXT: movq %rbx, 40(%rdx)
; FALLBACK15-NEXT: movq %r8, 16(%rdx)
-; FALLBACK15-NEXT: movq %rsi, 24(%rdx)
+; FALLBACK15-NEXT: movq %rax, 24(%rdx)
; FALLBACK15-NEXT: movq %r14, (%rdx)
; FALLBACK15-NEXT: movq %r10, 56(%rdx)
; FALLBACK15-NEXT: popq %rbx
@@ -13618,14 +13590,15 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK18-NEXT: movl 36(%eax), %ecx
; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl 40(%eax), %ebp
-; FALLBACK18-NEXT: movl 44(%eax), %ebx
+; FALLBACK18-NEXT: movl 40(%eax), %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 44(%eax), %ebp
; FALLBACK18-NEXT: movl 48(%eax), %edi
; FALLBACK18-NEXT: movl 52(%eax), %esi
; FALLBACK18-NEXT: movl 56(%eax), %edx
; FALLBACK18-NEXT: movl 60(%eax), %ecx
; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %eax
-; FALLBACK18-NEXT: movl (%eax), %eax
+; FALLBACK18-NEXT: movl (%eax), %ebx
; FALLBACK18-NEXT: xorps %xmm0, %xmm0
; FALLBACK18-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
@@ -13634,136 +13607,138 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK18-NEXT: movl %edx, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movl %esi, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; FALLBACK18-NEXT: movl %ebx, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movl %ebp, {{[0-9]+}}(%esp)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; FALLBACK18-NEXT: movl %eax, %ecx
-; FALLBACK18-NEXT: leal (,%eax,8), %edx
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; FALLBACK18-NEXT: leal (,%ebx,8), %edx
; FALLBACK18-NEXT: andl $24, %edx
-; FALLBACK18-NEXT: andl $60, %ecx
-; FALLBACK18-NEXT: movl 68(%esp,%ecx), %esi
-; FALLBACK18-NEXT: movl 72(%esp,%ecx), %eax
+; FALLBACK18-NEXT: movl %edx, %ecx
+; FALLBACK18-NEXT: andl $60, %ebx
+; FALLBACK18-NEXT: movl 68(%esp,%ebx), %esi
+; FALLBACK18-NEXT: movl 72(%esp,%ebx), %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shrxl %edx, %esi, %edi
-; FALLBACK18-NEXT: movl %edx, %ebx
-; FALLBACK18-NEXT: notb %bl
+; FALLBACK18-NEXT: shrxl %ecx, %esi, %edi
+; FALLBACK18-NEXT: notb %dl
; FALLBACK18-NEXT: leal (%eax,%eax), %ebp
-; FALLBACK18-NEXT: shlxl %ebx, %ebp, %eax
+; FALLBACK18-NEXT: shlxl %edx, %ebp, %eax
; FALLBACK18-NEXT: orl %edi, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shrxl %edx, 64(%esp,%ecx), %edi
+; FALLBACK18-NEXT: shrxl %ecx, 64(%esp,%ebx), %edi
; FALLBACK18-NEXT: addl %esi, %esi
-; FALLBACK18-NEXT: shlxl %ebx, %esi, %eax
+; FALLBACK18-NEXT: shlxl %edx, %esi, %eax
; FALLBACK18-NEXT: orl %edi, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl 80(%esp,%ecx), %esi
+; FALLBACK18-NEXT: movl 80(%esp,%ebx), %esi
; FALLBACK18-NEXT: leal (%esi,%esi), %edi
-; FALLBACK18-NEXT: shlxl %ebx, %edi, %eax
-; FALLBACK18-NEXT: movl 76(%esp,%ecx), %edi
-; FALLBACK18-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK18-NEXT: shlxl %edx, %edi, %eax
+; FALLBACK18-NEXT: movl 76(%esp,%ebx), %edi
+; FALLBACK18-NEXT: shrxl %ecx, %edi, %ebp
; FALLBACK18-NEXT: orl %ebp, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK18-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK18-NEXT: addl %edi, %edi
-; FALLBACK18-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK18-NEXT: shlxl %edx, %edi, %edi
; FALLBACK18-NEXT: orl %eax, %edi
; FALLBACK18-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl 88(%esp,%ecx), %eax
+; FALLBACK18-NEXT: movl 88(%esp,%ebx), %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK18-NEXT: leal (%eax,%eax), %edi
-; FALLBACK18-NEXT: shlxl %ebx, %edi, %eax
-; FALLBACK18-NEXT: movl 84(%esp,%ecx), %edi
-; FALLBACK18-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK18-NEXT: shlxl %edx, %edi, %eax
+; FALLBACK18-NEXT: movl 84(%esp,%ebx), %edi
+; FALLBACK18-NEXT: shrxl %ecx, %edi, %ebp
; FALLBACK18-NEXT: orl %ebp, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shrxl %edx, %esi, %esi
+; FALLBACK18-NEXT: shrxl %ecx, %esi, %esi
; FALLBACK18-NEXT: addl %edi, %edi
-; FALLBACK18-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK18-NEXT: shlxl %edx, %edi, %eax
; FALLBACK18-NEXT: orl %esi, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl 96(%esp,%ecx), %esi
+; FALLBACK18-NEXT: movl 96(%esp,%ebx), %esi
; FALLBACK18-NEXT: leal (%esi,%esi), %edi
-; FALLBACK18-NEXT: shlxl %ebx, %edi, %eax
-; FALLBACK18-NEXT: movl 92(%esp,%ecx), %edi
-; FALLBACK18-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK18-NEXT: shlxl %edx, %edi, %eax
+; FALLBACK18-NEXT: movl 92(%esp,%ebx), %edi
+; FALLBACK18-NEXT: shrxl %ecx, %edi, %ebp
; FALLBACK18-NEXT: orl %ebp, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK18-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK18-NEXT: addl %edi, %edi
-; FALLBACK18-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK18-NEXT: shlxl %edx, %edi, %edi
; FALLBACK18-NEXT: orl %eax, %edi
; FALLBACK18-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl 104(%esp,%ecx), %eax
+; FALLBACK18-NEXT: movl 104(%esp,%ebx), %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK18-NEXT: leal (%eax,%eax), %edi
-; FALLBACK18-NEXT: shlxl %ebx, %edi, %eax
-; FALLBACK18-NEXT: movl 100(%esp,%ecx), %edi
-; FALLBACK18-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK18-NEXT: shlxl %edx, %edi, %eax
+; FALLBACK18-NEXT: movl 100(%esp,%ebx), %edi
+; FALLBACK18-NEXT: shrxl %ecx, %edi, %ebp
; FALLBACK18-NEXT: orl %ebp, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shrxl %edx, %esi, %esi
+; FALLBACK18-NEXT: shrxl %ecx, %esi, %esi
; FALLBACK18-NEXT: addl %edi, %edi
-; FALLBACK18-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK18-NEXT: shlxl %edx, %edi, %eax
; FALLBACK18-NEXT: orl %esi, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl 112(%esp,%ecx), %eax
+; FALLBACK18-NEXT: movl 112(%esp,%ebx), %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK18-NEXT: leal (%eax,%eax), %esi
-; FALLBACK18-NEXT: shlxl %ebx, %esi, %eax
-; FALLBACK18-NEXT: movl 108(%esp,%ecx), %esi
-; FALLBACK18-NEXT: movl %ecx, %edi
-; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shrxl %edx, %esi, %ebp
-; FALLBACK18-NEXT: orl %ebp, %eax
+; FALLBACK18-NEXT: shlxl %edx, %esi, %eax
+; FALLBACK18-NEXT: movl 108(%esp,%ebx), %esi
+; FALLBACK18-NEXT: shrxl %ecx, %esi, %edi
+; FALLBACK18-NEXT: orl %edi, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; FALLBACK18-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK18-NEXT: movl %ecx, %ebp
; FALLBACK18-NEXT: addl %esi, %esi
-; FALLBACK18-NEXT: shlxl %ebx, %esi, %esi
-; FALLBACK18-NEXT: orl %ecx, %esi
-; FALLBACK18-NEXT: movl 120(%esp,%edi), %ebp
-; FALLBACK18-NEXT: leal (%ebp,%ebp), %ecx
-; FALLBACK18-NEXT: shlxl %ebx, %ecx, %ecx
-; FALLBACK18-NEXT: movl 116(%esp,%edi), %eax
-; FALLBACK18-NEXT: shrxl %edx, %eax, %edi
-; FALLBACK18-NEXT: orl %edi, %ecx
-; FALLBACK18-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; FALLBACK18-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shlxl %edx, %esi, %ecx
+; FALLBACK18-NEXT: orl %eax, %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 120(%esp,%ebx), %edi
+; FALLBACK18-NEXT: leal (%edi,%edi), %ecx
+; FALLBACK18-NEXT: shlxl %edx, %ecx, %esi
+; FALLBACK18-NEXT: movl 116(%esp,%ebx), %eax
+; FALLBACK18-NEXT: movl %ebp, %ecx
+; FALLBACK18-NEXT: shrxl %ebp, %eax, %ebp
+; FALLBACK18-NEXT: orl %ebp, %esi
+; FALLBACK18-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; FALLBACK18-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl %ecx, %ebp
; FALLBACK18-NEXT: addl %eax, %eax
-; FALLBACK18-NEXT: shlxl %ebx, %eax, %edi
-; FALLBACK18-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; FALLBACK18-NEXT: shrxl %edx, %ebp, %eax
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; FALLBACK18-NEXT: movl 124(%esp,%ebp), %ebp
-; FALLBACK18-NEXT: shrxl %edx, %ebp, %edx
-; FALLBACK18-NEXT: addl %ebp, %ebp
-; FALLBACK18-NEXT: shlxl %ebx, %ebp, %ebx
-; FALLBACK18-NEXT: orl %eax, %ebx
+; FALLBACK18-NEXT: shlxl %edx, %eax, %ecx
+; FALLBACK18-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; FALLBACK18-NEXT: movl 124(%esp,%ebx), %eax
+; FALLBACK18-NEXT: leal (%eax,%eax), %ebx
+; FALLBACK18-NEXT: shlxl %edx, %ebx, %edx
+; FALLBACK18-NEXT: shrxl %ebp, %edi, %edi
+; FALLBACK18-NEXT: orl %edi, %edx
+; FALLBACK18-NEXT: shrxl %ebp, %eax, %edi
; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %eax
-; FALLBACK18-NEXT: movl %edx, 60(%eax)
-; FALLBACK18-NEXT: movl %ebx, 56(%eax)
-; FALLBACK18-NEXT: movl %edi, 48(%eax)
-; FALLBACK18-NEXT: movl %ecx, 52(%eax)
-; FALLBACK18-NEXT: movl %esi, 40(%eax)
+; FALLBACK18-NEXT: movl %edi, 60(%eax)
+; FALLBACK18-NEXT: movl %edx, 56(%eax)
+; FALLBACK18-NEXT: movl %ecx, 48(%eax)
+; FALLBACK18-NEXT: movl %esi, 52(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 40(%eax)
; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; FALLBACK18-NEXT: movl %ecx, 44(%eax)
; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
@@ -14284,7 +14259,7 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK22-NEXT: movups 16(%ecx), %xmm1
; FALLBACK22-NEXT: movups 32(%ecx), %xmm2
; FALLBACK22-NEXT: movups 48(%ecx), %xmm3
-; FALLBACK22-NEXT: movl (%eax), %ecx
+; FALLBACK22-NEXT: movl (%eax), %ebx
; FALLBACK22-NEXT: xorps %xmm4, %xmm4
; FALLBACK22-NEXT: movaps %xmm4, {{[0-9]+}}(%esp)
; FALLBACK22-NEXT: movaps %xmm4, {{[0-9]+}}(%esp)
@@ -14294,112 +14269,114 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK22-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
; FALLBACK22-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
; FALLBACK22-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
-; FALLBACK22-NEXT: leal (,%ecx,8), %edx
+; FALLBACK22-NEXT: leal (,%ebx,8), %edx
; FALLBACK22-NEXT: andl $24, %edx
-; FALLBACK22-NEXT: andl $60, %ecx
-; FALLBACK22-NEXT: movl 68(%esp,%ecx), %esi
-; FALLBACK22-NEXT: movl 72(%esp,%ecx), %eax
+; FALLBACK22-NEXT: movl %edx, %ecx
+; FALLBACK22-NEXT: andl $60, %ebx
+; FALLBACK22-NEXT: movl 68(%esp,%ebx), %esi
+; FALLBACK22-NEXT: movl 72(%esp,%ebx), %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shrxl %edx, %esi, %edi
-; FALLBACK22-NEXT: movl %edx, %ebx
-; FALLBACK22-NEXT: notb %bl
+; FALLBACK22-NEXT: shrxl %ecx, %esi, %edi
+; FALLBACK22-NEXT: notb %dl
; FALLBACK22-NEXT: leal (%eax,%eax), %ebp
-; FALLBACK22-NEXT: shlxl %ebx, %ebp, %ebp
-; FALLBACK22-NEXT: orl %edi, %ebp
-; FALLBACK22-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shrxl %edx, 64(%esp,%ecx), %edi
+; FALLBACK22-NEXT: shlxl %edx, %ebp, %eax
+; FALLBACK22-NEXT: orl %edi, %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shrxl %ecx, 64(%esp,%ebx), %edi
; FALLBACK22-NEXT: addl %esi, %esi
-; FALLBACK22-NEXT: shlxl %ebx, %esi, %esi
-; FALLBACK22-NEXT: orl %edi, %esi
-; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: movl 80(%esp,%ecx), %esi
+; FALLBACK22-NEXT: shlxl %edx, %esi, %eax
+; FALLBACK22-NEXT: orl %edi, %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl 80(%esp,%ebx), %esi
; FALLBACK22-NEXT: leal (%esi,%esi), %edi
-; FALLBACK22-NEXT: shlxl %ebx, %edi, %eax
-; FALLBACK22-NEXT: movl 76(%esp,%ecx), %edi
-; FALLBACK22-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK22-NEXT: shlxl %edx, %edi, %eax
+; FALLBACK22-NEXT: movl 76(%esp,%ebx), %edi
+; FALLBACK22-NEXT: shrxl %ecx, %edi, %ebp
; FALLBACK22-NEXT: orl %ebp, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK22-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK22-NEXT: addl %edi, %edi
-; FALLBACK22-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK22-NEXT: shlxl %edx, %edi, %edi
; FALLBACK22-NEXT: orl %eax, %edi
; FALLBACK22-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: movl 88(%esp,%ecx), %eax
+; FALLBACK22-NEXT: movl 88(%esp,%ebx), %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK22-NEXT: leal (%eax,%eax), %edi
-; FALLBACK22-NEXT: shlxl %ebx, %edi, %eax
-; FALLBACK22-NEXT: movl 84(%esp,%ecx), %edi
-; FALLBACK22-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK22-NEXT: shlxl %edx, %edi, %eax
+; FALLBACK22-NEXT: movl 84(%esp,%ebx), %edi
+; FALLBACK22-NEXT: shrxl %ecx, %edi, %ebp
; FALLBACK22-NEXT: orl %ebp, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shrxl %edx, %esi, %esi
+; FALLBACK22-NEXT: shrxl %ecx, %esi, %esi
; FALLBACK22-NEXT: addl %edi, %edi
-; FALLBACK22-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK22-NEXT: shlxl %edx, %edi, %eax
; FALLBACK22-NEXT: orl %esi, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: movl 96(%esp,%ecx), %esi
+; FALLBACK22-NEXT: movl 96(%esp,%ebx), %esi
; FALLBACK22-NEXT: leal (%esi,%esi), %edi
-; FALLBACK22-NEXT: shlxl %ebx, %edi, %eax
-; FALLBACK22-NEXT: movl 92(%esp,%ecx), %edi
-; FALLBACK22-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK22-NEXT: shlxl %edx, %edi, %eax
+; FALLBACK22-NEXT: movl 92(%esp,%ebx), %edi
+; FALLBACK22-NEXT: shrxl %ecx, %edi, %ebp
; FALLBACK22-NEXT: orl %ebp, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK22-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK22-NEXT: addl %edi, %edi
-; FALLBACK22-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK22-NEXT: shlxl %edx, %edi, %edi
; FALLBACK22-NEXT: orl %eax, %edi
; FALLBACK22-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: movl 104(%esp,%ecx), %eax
+; FALLBACK22-NEXT: movl 104(%esp,%ebx), %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK22-NEXT: leal (%eax,%eax), %edi
-; FALLBACK22-NEXT: shlxl %ebx, %edi, %eax
-; FALLBACK22-NEXT: movl 100(%esp,%ecx), %edi
-; FALLBACK22-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK22-NEXT: shlxl %edx, %edi, %eax
+; FALLBACK22-NEXT: movl 100(%esp,%ebx), %edi
+; FALLBACK22-NEXT: shrxl %ecx, %edi, %ebp
; FALLBACK22-NEXT: orl %ebp, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shrxl %edx, %esi, %esi
+; FALLBACK22-NEXT: shrxl %ecx, %esi, %esi
; FALLBACK22-NEXT: addl %edi, %edi
-; FALLBACK22-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK22-NEXT: shlxl %edx, %edi, %eax
; FALLBACK22-NEXT: orl %esi, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: movl %ecx, %eax
-; FALLBACK22-NEXT: movl 112(%esp,%ecx), %ecx
-; FALLBACK22-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: leal (%ecx,%ecx), %esi
-; FALLBACK22-NEXT: shlxl %ebx, %esi, %ecx
-; FALLBACK22-NEXT: movl 108(%esp,%eax), %esi
+; FALLBACK22-NEXT: movl 112(%esp,%ebx), %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shrxl %edx, %esi, %ebp
-; FALLBACK22-NEXT: orl %ebp, %ecx
-; FALLBACK22-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; FALLBACK22-NEXT: leal (%eax,%eax), %esi
+; FALLBACK22-NEXT: shlxl %edx, %esi, %eax
+; FALLBACK22-NEXT: movl 108(%esp,%ebx), %esi
+; FALLBACK22-NEXT: shrxl %ecx, %esi, %edi
+; FALLBACK22-NEXT: orl %edi, %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK22-NEXT: movl %ecx, %ebp
; FALLBACK22-NEXT: addl %esi, %esi
-; FALLBACK22-NEXT: shlxl %ebx, %esi, %esi
-; FALLBACK22-NEXT: orl %ecx, %esi
-; FALLBACK22-NEXT: movl 120(%esp,%eax), %ebp
-; FALLBACK22-NEXT: leal (%ebp,%ebp), %ecx
-; FALLBACK22-NEXT: shlxl %ebx, %ecx, %ecx
-; FALLBACK22-NEXT: movl 116(%esp,%eax), %eax
-; FALLBACK22-NEXT: shrxl %edx, %eax, %edi
-; FALLBACK22-NEXT: orl %edi, %ecx
-; FALLBACK22-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; FALLBACK22-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shlxl %edx, %esi, %ecx
+; FALLBACK22-NEXT: orl %eax, %ecx
+; FALLBACK22-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl 120(%esp,%ebx), %edi
+; FALLBACK22-NEXT: leal (%edi,%edi), %ecx
+; FALLBACK22-NEXT: shlxl %edx, %ecx, %esi
+; FALLBACK22-NEXT: movl 116(%esp,%ebx), %eax
+; FALLBACK22-NEXT: movl %ebp, %ecx
+; FALLBACK22-NEXT: shrxl %ebp, %eax, %ebp
+; FALLBACK22-NEXT: orl %ebp, %esi
+; FALLBACK22-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; FALLBACK22-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl %ecx, %ebp
; FALLBACK22-NEXT: addl %eax, %eax
-; FALLBACK22-NEXT: shlxl %ebx, %eax, %edi
-; FALLBACK22-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; FALLBACK22-NEXT: shrxl %edx, %ebp, %eax
-; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; FALLBACK22-NEXT: movl 124(%esp,%ebp), %ebp
-; FALLBACK22-NEXT: shrxl %edx, %ebp, %edx
-; FALLBACK22-NEXT: addl %ebp, %ebp
-; FALLBACK22-NEXT: shlxl %ebx, %ebp, %ebx
-; FALLBACK22-NEXT: orl %eax, %ebx
+; FALLBACK22-NEXT: shlxl %edx, %eax, %ecx
+; FALLBACK22-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; FALLBACK22-NEXT: movl 124(%esp,%ebx), %eax
+; FALLBACK22-NEXT: leal (%eax,%eax), %ebx
+; FALLBACK22-NEXT: shlxl %edx, %ebx, %edx
+; FALLBACK22-NEXT: shrxl %ebp, %edi, %edi
+; FALLBACK22-NEXT: orl %edi, %edx
+; FALLBACK22-NEXT: shrxl %ebp, %eax, %edi
; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %eax
-; FALLBACK22-NEXT: movl %edx, 60(%eax)
-; FALLBACK22-NEXT: movl %ebx, 56(%eax)
-; FALLBACK22-NEXT: movl %edi, 48(%eax)
-; FALLBACK22-NEXT: movl %ecx, 52(%eax)
-; FALLBACK22-NEXT: movl %esi, 40(%eax)
+; FALLBACK22-NEXT: movl %edi, 60(%eax)
+; FALLBACK22-NEXT: movl %edx, 56(%eax)
+; FALLBACK22-NEXT: movl %ecx, 48(%eax)
+; FALLBACK22-NEXT: movl %esi, 52(%eax)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK22-NEXT: movl %ecx, 40(%eax)
; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; FALLBACK22-NEXT: movl %ecx, 44(%eax)
; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
@@ -14873,109 +14850,107 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK26-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
; FALLBACK26-NEXT: leal (,%ecx,8), %edx
; FALLBACK26-NEXT: andl $24, %edx
+; FALLBACK26-NEXT: movl %edx, %ebx
; FALLBACK26-NEXT: andl $60, %ecx
; FALLBACK26-NEXT: movl 68(%esp,%ecx), %esi
; FALLBACK26-NEXT: movl 72(%esp,%ecx), %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shrxl %edx, %esi, %edi
-; FALLBACK26-NEXT: movl %edx, %ebx
-; FALLBACK26-NEXT: notb %bl
+; FALLBACK26-NEXT: shrxl %ebx, %esi, %edi
+; FALLBACK26-NEXT: notb %dl
; FALLBACK26-NEXT: leal (%eax,%eax), %ebp
-; FALLBACK26-NEXT: shlxl %ebx, %ebp, %ebp
+; FALLBACK26-NEXT: shlxl %edx, %ebp, %ebp
; FALLBACK26-NEXT: orl %edi, %ebp
; FALLBACK26-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shrxl %edx, 64(%esp,%ecx), %edi
+; FALLBACK26-NEXT: shrxl %ebx, 64(%esp,%ecx), %edi
; FALLBACK26-NEXT: addl %esi, %esi
-; FALLBACK26-NEXT: shlxl %ebx, %esi, %esi
+; FALLBACK26-NEXT: shlxl %edx, %esi, %esi
; FALLBACK26-NEXT: orl %edi, %esi
; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK26-NEXT: movl 80(%esp,%ecx), %esi
; FALLBACK26-NEXT: leal (%esi,%esi), %edi
-; FALLBACK26-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK26-NEXT: shlxl %edx, %edi, %eax
; FALLBACK26-NEXT: movl 76(%esp,%ecx), %edi
-; FALLBACK26-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK26-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK26-NEXT: orl %ebp, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK26-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK26-NEXT: addl %edi, %edi
-; FALLBACK26-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK26-NEXT: shlxl %edx, %edi, %edi
; FALLBACK26-NEXT: orl %eax, %edi
; FALLBACK26-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK26-NEXT: movl 88(%esp,%ecx), %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK26-NEXT: leal (%eax,%eax), %edi
-; FALLBACK26-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK26-NEXT: shlxl %edx, %edi, %eax
; FALLBACK26-NEXT: movl 84(%esp,%ecx), %edi
-; FALLBACK26-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK26-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK26-NEXT: orl %ebp, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shrxl %edx, %esi, %esi
+; FALLBACK26-NEXT: shrxl %ebx, %esi, %esi
; FALLBACK26-NEXT: addl %edi, %edi
-; FALLBACK26-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK26-NEXT: shlxl %edx, %edi, %eax
; FALLBACK26-NEXT: orl %esi, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK26-NEXT: movl 96(%esp,%ecx), %esi
; FALLBACK26-NEXT: leal (%esi,%esi), %edi
-; FALLBACK26-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK26-NEXT: shlxl %edx, %edi, %eax
; FALLBACK26-NEXT: movl 92(%esp,%ecx), %edi
-; FALLBACK26-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK26-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK26-NEXT: orl %ebp, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK26-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK26-NEXT: addl %edi, %edi
-; FALLBACK26-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK26-NEXT: shlxl %edx, %edi, %edi
; FALLBACK26-NEXT: orl %eax, %edi
; FALLBACK26-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK26-NEXT: movl 104(%esp,%ecx), %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK26-NEXT: leal (%eax,%eax), %edi
-; FALLBACK26-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK26-NEXT: shlxl %edx, %edi, %eax
; FALLBACK26-NEXT: movl 100(%esp,%ecx), %edi
-; FALLBACK26-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK26-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK26-NEXT: orl %ebp, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shrxl %edx, %esi, %esi
+; FALLBACK26-NEXT: shrxl %ebx, %esi, %esi
; FALLBACK26-NEXT: addl %edi, %edi
-; FALLBACK26-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK26-NEXT: shlxl %edx, %edi, %eax
; FALLBACK26-NEXT: orl %esi, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK26-NEXT: movl 112(%esp,%ecx), %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK26-NEXT: leal (%eax,%eax), %esi
-; FALLBACK26-NEXT: shlxl %ebx, %esi, %eax
+; FALLBACK26-NEXT: shlxl %edx, %esi, %eax
; FALLBACK26-NEXT: movl 108(%esp,%ecx), %esi
-; FALLBACK26-NEXT: shrxl %edx, %esi, %ebp
-; FALLBACK26-NEXT: orl %ebp, %eax
+; FALLBACK26-NEXT: shrxl %ebx, %esi, %edi
+; FALLBACK26-NEXT: orl %edi, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK26-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK26-NEXT: addl %esi, %esi
-; FALLBACK26-NEXT: shlxl %ebx, %esi, %esi
-; FALLBACK26-NEXT: orl %eax, %esi
-; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: movl 120(%esp,%ecx), %ebp
-; FALLBACK26-NEXT: leal (%ebp,%ebp), %eax
-; FALLBACK26-NEXT: shlxl %ebx, %eax, %esi
+; FALLBACK26-NEXT: shlxl %edx, %esi, %ebp
+; FALLBACK26-NEXT: orl %eax, %ebp
+; FALLBACK26-NEXT: movl 120(%esp,%ecx), %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: addl %eax, %eax
+; FALLBACK26-NEXT: shlxl %edx, %eax, %esi
; FALLBACK26-NEXT: movl 116(%esp,%ecx), %eax
-; FALLBACK26-NEXT: shrxl %edx, %eax, %edi
+; FALLBACK26-NEXT: shrxl %ebx, %eax, %edi
; FALLBACK26-NEXT: orl %edi, %esi
-; FALLBACK26-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; FALLBACK26-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
; FALLBACK26-NEXT: addl %eax, %eax
-; FALLBACK26-NEXT: shlxl %ebx, %eax, %edi
-; FALLBACK26-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; FALLBACK26-NEXT: shrxl %edx, %ebp, %eax
+; FALLBACK26-NEXT: shlxl %edx, %eax, %eax
+; FALLBACK26-NEXT: orl %edi, %eax
; FALLBACK26-NEXT: movl 124(%esp,%ecx), %ecx
-; FALLBACK26-NEXT: shrxl %edx, %ecx, %edx
-; FALLBACK26-NEXT: addl %ecx, %ecx
-; FALLBACK26-NEXT: shlxl %ebx, %ecx, %ebx
-; FALLBACK26-NEXT: orl %eax, %ebx
+; FALLBACK26-NEXT: leal (%ecx,%ecx), %edi
+; FALLBACK26-NEXT: shlxl %edx, %edi, %edx
+; FALLBACK26-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK26-NEXT: orl %edi, %edx
+; FALLBACK26-NEXT: shrxl %ebx, %ecx, %edi
; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; FALLBACK26-NEXT: movl %edx, 60(%ecx)
-; FALLBACK26-NEXT: movl %ebx, 56(%ecx)
-; FALLBACK26-NEXT: movl %edi, 48(%ecx)
+; FALLBACK26-NEXT: movl %edi, 60(%ecx)
+; FALLBACK26-NEXT: movl %edx, 56(%ecx)
+; FALLBACK26-NEXT: movl %eax, 48(%ecx)
; FALLBACK26-NEXT: movl %esi, 52(%ecx)
-; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK26-NEXT: movl %eax, 40(%ecx)
+; FALLBACK26-NEXT: movl %ebp, 40(%ecx)
; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; FALLBACK26-NEXT: movl %eax, 44(%ecx)
; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
@@ -15430,115 +15405,113 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %eax
; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %ecx
; FALLBACK30-NEXT: vmovups (%ecx), %zmm0
-; FALLBACK30-NEXT: movl (%eax), %edx
+; FALLBACK30-NEXT: movl (%eax), %ecx
; FALLBACK30-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FALLBACK30-NEXT: vmovups %zmm1, {{[0-9]+}}(%esp)
; FALLBACK30-NEXT: vmovups %zmm0, {{[0-9]+}}(%esp)
-; FALLBACK30-NEXT: leal (,%edx,8), %ecx
-; FALLBACK30-NEXT: andl $24, %ecx
-; FALLBACK30-NEXT: andl $60, %edx
-; FALLBACK30-NEXT: movl 68(%esp,%edx), %esi
-; FALLBACK30-NEXT: movl 72(%esp,%edx), %eax
+; FALLBACK30-NEXT: leal (,%ecx,8), %edx
+; FALLBACK30-NEXT: andl $24, %edx
+; FALLBACK30-NEXT: movl %edx, %ebx
+; FALLBACK30-NEXT: andl $60, %ecx
+; FALLBACK30-NEXT: movl 68(%esp,%ecx), %esi
+; FALLBACK30-NEXT: movl 72(%esp,%ecx), %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shrxl %ecx, %esi, %edi
-; FALLBACK30-NEXT: movl %ecx, %ebx
-; FALLBACK30-NEXT: notb %bl
+; FALLBACK30-NEXT: shrxl %ebx, %esi, %edi
+; FALLBACK30-NEXT: notb %dl
; FALLBACK30-NEXT: leal (%eax,%eax), %ebp
-; FALLBACK30-NEXT: shlxl %ebx, %ebp, %ebp
+; FALLBACK30-NEXT: shlxl %edx, %ebp, %ebp
; FALLBACK30-NEXT: orl %edi, %ebp
; FALLBACK30-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shrxl %ecx, 64(%esp,%edx), %edi
+; FALLBACK30-NEXT: shrxl %ebx, 64(%esp,%ecx), %edi
; FALLBACK30-NEXT: addl %esi, %esi
-; FALLBACK30-NEXT: shlxl %ebx, %esi, %esi
+; FALLBACK30-NEXT: shlxl %edx, %esi, %esi
; FALLBACK30-NEXT: orl %edi, %esi
; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: movl 80(%esp,%edx), %esi
+; FALLBACK30-NEXT: movl 80(%esp,%ecx), %esi
; FALLBACK30-NEXT: leal (%esi,%esi), %edi
-; FALLBACK30-NEXT: shlxl %ebx, %edi, %eax
-; FALLBACK30-NEXT: movl 76(%esp,%edx), %edi
-; FALLBACK30-NEXT: shrxl %ecx, %edi, %ebp
+; FALLBACK30-NEXT: shlxl %edx, %edi, %eax
+; FALLBACK30-NEXT: movl 76(%esp,%ecx), %edi
+; FALLBACK30-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK30-NEXT: orl %ebp, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK30-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK30-NEXT: addl %edi, %edi
-; FALLBACK30-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK30-NEXT: shlxl %edx, %edi, %edi
; FALLBACK30-NEXT: orl %eax, %edi
; FALLBACK30-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: movl 88(%esp,%edx), %eax
+; FALLBACK30-NEXT: movl 88(%esp,%ecx), %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK30-NEXT: leal (%eax,%eax), %edi
-; FALLBACK30-NEXT: shlxl %ebx, %edi, %eax
-; FALLBACK30-NEXT: movl 84(%esp,%edx), %edi
-; FALLBACK30-NEXT: shrxl %ecx, %edi, %ebp
+; FALLBACK30-NEXT: shlxl %edx, %edi, %eax
+; FALLBACK30-NEXT: movl 84(%esp,%ecx), %edi
+; FALLBACK30-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK30-NEXT: orl %ebp, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shrxl %ecx, %esi, %esi
+; FALLBACK30-NEXT: shrxl %ebx, %esi, %esi
; FALLBACK30-NEXT: addl %edi, %edi
-; FALLBACK30-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK30-NEXT: shlxl %edx, %edi, %eax
; FALLBACK30-NEXT: orl %esi, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: movl 96(%esp,%edx), %esi
+; FALLBACK30-NEXT: movl 96(%esp,%ecx), %esi
; FALLBACK30-NEXT: leal (%esi,%esi), %edi
-; FALLBACK30-NEXT: shlxl %ebx, %edi, %eax
-; FALLBACK30-NEXT: movl 92(%esp,%edx), %edi
-; FALLBACK30-NEXT: shrxl %ecx, %edi, %ebp
+; FALLBACK30-NEXT: shlxl %edx, %edi, %eax
+; FALLBACK30-NEXT: movl 92(%esp,%ecx), %edi
+; FALLBACK30-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK30-NEXT: orl %ebp, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK30-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK30-NEXT: addl %edi, %edi
-; FALLBACK30-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK30-NEXT: shlxl %edx, %edi, %edi
; FALLBACK30-NEXT: orl %eax, %edi
; FALLBACK30-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: movl 104(%esp,%edx), %eax
+; FALLBACK30-NEXT: movl 104(%esp,%ecx), %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK30-NEXT: leal (%eax,%eax), %edi
-; FALLBACK30-NEXT: shlxl %ebx, %edi, %eax
-; FALLBACK30-NEXT: movl 100(%esp,%edx), %edi
-; FALLBACK30-NEXT: shrxl %ecx, %edi, %ebp
+; FALLBACK30-NEXT: shlxl %edx, %edi, %eax
+; FALLBACK30-NEXT: movl 100(%esp,%ecx), %edi
+; FALLBACK30-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK30-NEXT: orl %ebp, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shrxl %ecx, %esi, %esi
+; FALLBACK30-NEXT: shrxl %ebx, %esi, %esi
; FALLBACK30-NEXT: addl %edi, %edi
-; FALLBACK30-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK30-NEXT: shlxl %edx, %edi, %eax
; FALLBACK30-NEXT: orl %esi, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: movl 112(%esp,%edx), %eax
+; FALLBACK30-NEXT: movl 112(%esp,%ecx), %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK30-NEXT: leal (%eax,%eax), %esi
-; FALLBACK30-NEXT: shlxl %ebx, %esi, %eax
-; FALLBACK30-NEXT: movl 108(%esp,%edx), %esi
-; FALLBACK30-NEXT: shrxl %ecx, %esi, %ebp
-; FALLBACK30-NEXT: orl %ebp, %eax
+; FALLBACK30-NEXT: shlxl %edx, %esi, %eax
+; FALLBACK30-NEXT: movl 108(%esp,%ecx), %esi
+; FALLBACK30-NEXT: shrxl %ebx, %esi, %edi
+; FALLBACK30-NEXT: orl %edi, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK30-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK30-NEXT: addl %esi, %esi
-; FALLBACK30-NEXT: shlxl %ebx, %esi, %esi
-; FALLBACK30-NEXT: orl %eax, %esi
-; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: movl 120(%esp,%edx), %ebp
-; FALLBACK30-NEXT: leal (%ebp,%ebp), %eax
-; FALLBACK30-NEXT: shlxl %ebx, %eax, %esi
-; FALLBACK30-NEXT: movl 116(%esp,%edx), %eax
-; FALLBACK30-NEXT: shrxl %ecx, %eax, %edi
+; FALLBACK30-NEXT: shlxl %edx, %esi, %ebp
+; FALLBACK30-NEXT: orl %eax, %ebp
+; FALLBACK30-NEXT: movl 120(%esp,%ecx), %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: addl %eax, %eax
+; FALLBACK30-NEXT: shlxl %edx, %eax, %esi
+; FALLBACK30-NEXT: movl 116(%esp,%ecx), %eax
+; FALLBACK30-NEXT: shrxl %ebx, %eax, %edi
; FALLBACK30-NEXT: orl %edi, %esi
-; FALLBACK30-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; FALLBACK30-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
; FALLBACK30-NEXT: addl %eax, %eax
-; FALLBACK30-NEXT: shlxl %ebx, %eax, %edi
-; FALLBACK30-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; FALLBACK30-NEXT: shrxl %ecx, %ebp, %eax
-; FALLBACK30-NEXT: movl 124(%esp,%edx), %edx
-; FALLBACK30-NEXT: shrxl %ecx, %edx, %ebp
-; FALLBACK30-NEXT: leal (%edx,%edx), %ecx
-; FALLBACK30-NEXT: shlxl %ebx, %ecx, %edx
-; FALLBACK30-NEXT: orl %eax, %edx
+; FALLBACK30-NEXT: shlxl %edx, %eax, %eax
+; FALLBACK30-NEXT: orl %edi, %eax
+; FALLBACK30-NEXT: movl 124(%esp,%ecx), %ecx
+; FALLBACK30-NEXT: leal (%ecx,%ecx), %edi
+; FALLBACK30-NEXT: shlxl %edx, %edi, %edx
+; FALLBACK30-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK30-NEXT: orl %edi, %edx
+; FALLBACK30-NEXT: shrxl %ebx, %ecx, %edi
; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; FALLBACK30-NEXT: movl %ebp, 60(%ecx)
+; FALLBACK30-NEXT: movl %edi, 60(%ecx)
; FALLBACK30-NEXT: movl %edx, 56(%ecx)
-; FALLBACK30-NEXT: movl %edi, 48(%ecx)
+; FALLBACK30-NEXT: movl %eax, 48(%ecx)
; FALLBACK30-NEXT: movl %esi, 52(%ecx)
-; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK30-NEXT: movl %eax, 40(%ecx)
+; FALLBACK30-NEXT: movl %ebp, 40(%ecx)
; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; FALLBACK30-NEXT: movl %eax, 44(%ecx)
; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
@@ -16196,10 +16169,8 @@ define void @shl_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
;
; FALLBACK2-LABEL: shl_64bytes:
; FALLBACK2: # %bb.0:
-; FALLBACK2-NEXT: pushq %rbp
; FALLBACK2-NEXT: pushq %r15
; FALLBACK2-NEXT: pushq %r14
-; FALLBACK2-NEXT: pushq %r13
; FALLBACK2-NEXT: pushq %r12
; FALLBACK2-NEXT: pushq %rbx
; FALLBACK2-NEXT: pushq %rax
@@ -16227,62 +16198,60 @@ define void @shl_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK2-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
; FALLBACK2-NEXT: leal (,%rsi,8), %eax
; FALLBACK2-NEXT: andl $56, %eax
+; FALLBACK2-NEXT: movl %eax, %ecx
; FALLBACK2-NEXT: andl $56, %esi
; FALLBACK2-NEXT: negl %esi
; FALLBACK2-NEXT: movslq %esi, %rsi
-; FALLBACK2-NEXT: movq -64(%rsp,%rsi), %r10
-; FALLBACK2-NEXT: movq -56(%rsp,%rsi), %rcx
-; FALLBACK2-NEXT: shlxq %rax, %rcx, %r9
-; FALLBACK2-NEXT: movq -40(%rsp,%rsi), %rdi
-; FALLBACK2-NEXT: shlxq %rax, %rdi, %r11
-; FALLBACK2-NEXT: movq -48(%rsp,%rsi), %r14
-; FALLBACK2-NEXT: shlxq %rax, %r14, %rbx
-; FALLBACK2-NEXT: movq -24(%rsp,%rsi), %r8
-; FALLBACK2-NEXT: shlxq %rax, %r8, %r15
-; FALLBACK2-NEXT: shlxq %rax, %r10, %r12
-; FALLBACK2-NEXT: movl %eax, %r13d
-; FALLBACK2-NEXT: notb %r13b
-; FALLBACK2-NEXT: shrq %r10
-; FALLBACK2-NEXT: shrxq %r13, %r10, %r10
-; FALLBACK2-NEXT: orq %r9, %r10
-; FALLBACK2-NEXT: movq -32(%rsp,%rsi), %r9
-; FALLBACK2-NEXT: shlxq %rax, %r9, %rbp
-; FALLBACK2-NEXT: shrq %r14
-; FALLBACK2-NEXT: shrxq %r13, %r14, %r14
-; FALLBACK2-NEXT: orq %r11, %r14
-; FALLBACK2-NEXT: shlxq %rax, -8(%rsp,%rsi), %r11
-; FALLBACK2-NEXT: movq -16(%rsp,%rsi), %rsi
-; FALLBACK2-NEXT: shlxq %rax, %rsi, %rax
-; FALLBACK2-NEXT: shrq %rcx
-; FALLBACK2-NEXT: shrxq %r13, %rcx, %rcx
-; FALLBACK2-NEXT: orq %rbx, %rcx
+; FALLBACK2-NEXT: movq -64(%rsp,%rsi), %r9
+; FALLBACK2-NEXT: movq -56(%rsp,%rsi), %rdi
+; FALLBACK2-NEXT: shlxq %rcx, %rdi, %r8
+; FALLBACK2-NEXT: notb %al
+; FALLBACK2-NEXT: shlxq %rcx, %r9, %r10
; FALLBACK2-NEXT: shrq %r9
-; FALLBACK2-NEXT: shrxq %r13, %r9, %r9
-; FALLBACK2-NEXT: orq %r15, %r9
+; FALLBACK2-NEXT: shrxq %rax, %r9, %r9
+; FALLBACK2-NEXT: orq %r8, %r9
+; FALLBACK2-NEXT: movq -40(%rsp,%rsi), %r11
+; FALLBACK2-NEXT: shlxq %rcx, %r11, %rbx
+; FALLBACK2-NEXT: movq -48(%rsp,%rsi), %r8
+; FALLBACK2-NEXT: shlxq %rcx, %r8, %r14
+; FALLBACK2-NEXT: shrq %r8
+; FALLBACK2-NEXT: shrxq %rax, %r8, %r8
+; FALLBACK2-NEXT: orq %rbx, %r8
; FALLBACK2-NEXT: shrq %rdi
-; FALLBACK2-NEXT: shrxq %r13, %rdi, %rdi
-; FALLBACK2-NEXT: orq %rbp, %rdi
+; FALLBACK2-NEXT: shrxq %rax, %rdi, %rdi
+; FALLBACK2-NEXT: orq %r14, %rdi
+; FALLBACK2-NEXT: movq -24(%rsp,%rsi), %rbx
+; FALLBACK2-NEXT: shlxq %rcx, %rbx, %r14
+; FALLBACK2-NEXT: movq -32(%rsp,%rsi), %r15
+; FALLBACK2-NEXT: shlxq %rcx, %r15, %r12
+; FALLBACK2-NEXT: shrq %r15
+; FALLBACK2-NEXT: shrxq %rax, %r15, %r15
+; FALLBACK2-NEXT: orq %r14, %r15
+; FALLBACK2-NEXT: shrq %r11
+; FALLBACK2-NEXT: shrxq %rax, %r11, %r11
+; FALLBACK2-NEXT: orq %r12, %r11
+; FALLBACK2-NEXT: shlxq %rcx, -8(%rsp,%rsi), %r14
+; FALLBACK2-NEXT: movq -16(%rsp,%rsi), %rsi
+; FALLBACK2-NEXT: shlxq %rcx, %rsi, %rcx
; FALLBACK2-NEXT: shrq %rsi
-; FALLBACK2-NEXT: shrxq %r13, %rsi, %rsi
-; FALLBACK2-NEXT: orq %r11, %rsi
-; FALLBACK2-NEXT: shrq %r8
-; FALLBACK2-NEXT: shrxq %r13, %r8, %r8
-; FALLBACK2-NEXT: orq %rax, %r8
-; FALLBACK2-NEXT: movq %r12, (%rdx)
-; FALLBACK2-NEXT: movq %r8, 48(%rdx)
+; FALLBACK2-NEXT: shrxq %rax, %rsi, %rsi
+; FALLBACK2-NEXT: orq %r14, %rsi
+; FALLBACK2-NEXT: shrq %rbx
+; FALLBACK2-NEXT: shrxq %rax, %rbx, %rax
+; FALLBACK2-NEXT: orq %rcx, %rax
+; FALLBACK2-NEXT: movq %r10, (%rdx)
+; FALLBACK2-NEXT: movq %rax, 48(%rdx)
; FALLBACK2-NEXT: movq %rsi, 56(%rdx)
-; FALLBACK2-NEXT: movq %rdi, 32(%rdx)
-; FALLBACK2-NEXT: movq %r9, 40(%rdx)
-; FALLBACK2-NEXT: movq %rcx, 16(%rdx)
-; FALLBACK2-NEXT: movq %r14, 24(%rdx)
-; FALLBACK2-NEXT: movq %r10, 8(%rdx)
+; FALLBACK2-NEXT: movq %r11, 32(%rdx)
+; FALLBACK2-NEXT: movq %r15, 40(%rdx)
+; FALLBACK2-NEXT: movq %rdi, 16(%rdx)
+; FALLBACK2-NEXT: movq %r8, 24(%rdx)
+; FALLBACK2-NEXT: movq %r9, 8(%rdx)
; FALLBACK2-NEXT: addq $8, %rsp
; FALLBACK2-NEXT: popq %rbx
; FALLBACK2-NEXT: popq %r12
-; FALLBACK2-NEXT: popq %r13
; FALLBACK2-NEXT: popq %r14
; FALLBACK2-NEXT: popq %r15
-; FALLBACK2-NEXT: popq %rbp
; FALLBACK2-NEXT: retq
;
; FALLBACK3-LABEL: shl_64bytes:
@@ -16509,86 +16478,81 @@ define void @shl_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
;
; FALLBACK6-LABEL: shl_64bytes:
; FALLBACK6: # %bb.0:
-; FALLBACK6-NEXT: pushq %rbp
; FALLBACK6-NEXT: pushq %r15
; FALLBACK6-NEXT: pushq %r14
-; FALLBACK6-NEXT: pushq %r13
; FALLBACK6-NEXT: pushq %r12
; FALLBACK6-NEXT: pushq %rbx
-; FALLBACK6-NEXT: subq $24, %rsp
+; FALLBACK6-NEXT: pushq %rax
; FALLBACK6-NEXT: movups (%rdi), %xmm0
; FALLBACK6-NEXT: movups 16(%rdi), %xmm1
; FALLBACK6-NEXT: movups 32(%rdi), %xmm2
; FALLBACK6-NEXT: movups 48(%rdi), %xmm3
-; FALLBACK6-NEXT: movl (%rsi), %eax
+; FALLBACK6-NEXT: movl (%rsi), %esi
; FALLBACK6-NEXT: xorps %xmm4, %xmm4
; FALLBACK6-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movaps %xmm4, -{{[0-9]+}}(%rsp)
-; FALLBACK6-NEXT: movaps %xmm3, (%rsp)
+; FALLBACK6-NEXT: movaps %xmm3, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movaps %xmm2, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
-; FALLBACK6-NEXT: leal (,%rax,8), %ecx
-; FALLBACK6-NEXT: andl $56, %ecx
+; FALLBACK6-NEXT: leal (,%rsi,8), %eax
; FALLBACK6-NEXT: andl $56, %eax
-; FALLBACK6-NEXT: negl %eax
-; FALLBACK6-NEXT: movslq %eax, %rsi
-; FALLBACK6-NEXT: movq -8(%rsp,%rsi), %rax
-; FALLBACK6-NEXT: shlxq %rcx, %rax, %r12
-; FALLBACK6-NEXT: movq -16(%rsp,%rsi), %rdi
-; FALLBACK6-NEXT: shlxq %rcx, %rdi, %r15
-; FALLBACK6-NEXT: movq -24(%rsp,%rsi), %r13
-; FALLBACK6-NEXT: shlxq %rcx, %r13, %r8
-; FALLBACK6-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; FALLBACK6-NEXT: movq -32(%rsp,%rsi), %r11
-; FALLBACK6-NEXT: shlxq %rcx, %r11, %r10
-; FALLBACK6-NEXT: movq -40(%rsp,%rsi), %r14
-; FALLBACK6-NEXT: shlxq %rcx, %r14, %rbx
-; FALLBACK6-NEXT: movl %ecx, %r9d
-; FALLBACK6-NEXT: notb %r9b
+; FALLBACK6-NEXT: movl %eax, %ecx
+; FALLBACK6-NEXT: andl $56, %esi
+; FALLBACK6-NEXT: negl %esi
+; FALLBACK6-NEXT: movslq %esi, %rsi
+; FALLBACK6-NEXT: movq -24(%rsp,%rsi), %rdi
+; FALLBACK6-NEXT: shlxq %rcx, %rdi, %r9
+; FALLBACK6-NEXT: notb %al
+; FALLBACK6-NEXT: movq -32(%rsp,%rsi), %r8
+; FALLBACK6-NEXT: shlxq %rcx, %r8, %r10
+; FALLBACK6-NEXT: shrq %r8
+; FALLBACK6-NEXT: shrxq %rax, %r8, %r8
+; FALLBACK6-NEXT: orq %r9, %r8
+; FALLBACK6-NEXT: movq -40(%rsp,%rsi), %r9
+; FALLBACK6-NEXT: shlxq %rcx, %r9, %r11
+; FALLBACK6-NEXT: shrq %r9
+; FALLBACK6-NEXT: shrxq %rax, %r9, %r9
+; FALLBACK6-NEXT: orq %r10, %r9
+; FALLBACK6-NEXT: movq -48(%rsp,%rsi), %r10
+; FALLBACK6-NEXT: shlxq %rcx, %r10, %r14
+; FALLBACK6-NEXT: shrq %r10
+; FALLBACK6-NEXT: shrxq %rax, %r10, %r10
+; FALLBACK6-NEXT: orq %r11, %r10
+; FALLBACK6-NEXT: movq -64(%rsp,%rsi), %rbx
+; FALLBACK6-NEXT: movq -56(%rsp,%rsi), %r11
+; FALLBACK6-NEXT: shlxq %rcx, %r11, %r15
+; FALLBACK6-NEXT: shrq %r11
+; FALLBACK6-NEXT: shrxq %rax, %r11, %r11
+; FALLBACK6-NEXT: orq %r14, %r11
+; FALLBACK6-NEXT: shlxq %rcx, %rbx, %r14
+; FALLBACK6-NEXT: shrq %rbx
+; FALLBACK6-NEXT: shrxq %rax, %rbx, %rbx
+; FALLBACK6-NEXT: orq %r15, %rbx
+; FALLBACK6-NEXT: movq -16(%rsp,%rsi), %r15
+; FALLBACK6-NEXT: shlxq %rcx, %r15, %r12
; FALLBACK6-NEXT: shrq %rdi
-; FALLBACK6-NEXT: shrxq %r9, %rdi, %rdi
+; FALLBACK6-NEXT: shrxq %rax, %rdi, %rdi
; FALLBACK6-NEXT: orq %r12, %rdi
-; FALLBACK6-NEXT: movq (%rsp,%rsi), %rbp
-; FALLBACK6-NEXT: shlxq %rcx, %rbp, %r8
-; FALLBACK6-NEXT: shrq %r13
-; FALLBACK6-NEXT: shrxq %r9, %r13, %r12
-; FALLBACK6-NEXT: orq %r15, %r12
-; FALLBACK6-NEXT: shlxq %rcx, 8(%rsp,%rsi), %r15
-; FALLBACK6-NEXT: movq -48(%rsp,%rsi), %rsi
-; FALLBACK6-NEXT: shlxq %rcx, %rsi, %rcx
-; FALLBACK6-NEXT: shrq %r11
-; FALLBACK6-NEXT: shrxq %r9, %r11, %r11
-; FALLBACK6-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
-; FALLBACK6-NEXT: shrq %r14
-; FALLBACK6-NEXT: shrxq %r9, %r14, %r14
-; FALLBACK6-NEXT: orq %r10, %r14
-; FALLBACK6-NEXT: shrq %rsi
-; FALLBACK6-NEXT: shrxq %r9, %rsi, %rsi
-; FALLBACK6-NEXT: orq %rbx, %rsi
-; FALLBACK6-NEXT: shrq %rax
-; FALLBACK6-NEXT: shrxq %r9, %rax, %rax
-; FALLBACK6-NEXT: orq %r8, %rax
-; FALLBACK6-NEXT: shrq %rbp
-; FALLBACK6-NEXT: shrxq %r9, %rbp, %r8
-; FALLBACK6-NEXT: orq %r15, %r8
-; FALLBACK6-NEXT: movq %rcx, (%rdx)
-; FALLBACK6-NEXT: movq %r8, 56(%rdx)
-; FALLBACK6-NEXT: movq %rax, 48(%rdx)
-; FALLBACK6-NEXT: movq %rsi, 8(%rdx)
-; FALLBACK6-NEXT: movq %r14, 16(%rdx)
-; FALLBACK6-NEXT: movq %r11, 24(%rdx)
-; FALLBACK6-NEXT: movq %r12, 32(%rdx)
-; FALLBACK6-NEXT: movq %rdi, 40(%rdx)
-; FALLBACK6-NEXT: addq $24, %rsp
+; FALLBACK6-NEXT: shlxq %rcx, -8(%rsp,%rsi), %rcx
+; FALLBACK6-NEXT: shrq %r15
+; FALLBACK6-NEXT: shrxq %rax, %r15, %rax
+; FALLBACK6-NEXT: orq %rcx, %rax
+; FALLBACK6-NEXT: movq %r14, (%rdx)
+; FALLBACK6-NEXT: movq %rax, 56(%rdx)
+; FALLBACK6-NEXT: movq %rdi, 48(%rdx)
+; FALLBACK6-NEXT: movq %rbx, 8(%rdx)
+; FALLBACK6-NEXT: movq %r11, 16(%rdx)
+; FALLBACK6-NEXT: movq %r10, 24(%rdx)
+; FALLBACK6-NEXT: movq %r9, 32(%rdx)
+; FALLBACK6-NEXT: movq %r8, 40(%rdx)
+; FALLBACK6-NEXT: addq $8, %rsp
; FALLBACK6-NEXT: popq %rbx
; FALLBACK6-NEXT: popq %r12
-; FALLBACK6-NEXT: popq %r13
; FALLBACK6-NEXT: popq %r14
; FALLBACK6-NEXT: popq %r15
-; FALLBACK6-NEXT: popq %rbp
; FALLBACK6-NEXT: retq
;
; FALLBACK7-LABEL: shl_64bytes:
@@ -16798,80 +16762,75 @@ define void @shl_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
;
; FALLBACK10-LABEL: shl_64bytes:
; FALLBACK10: # %bb.0:
-; FALLBACK10-NEXT: pushq %rbp
; FALLBACK10-NEXT: pushq %r15
; FALLBACK10-NEXT: pushq %r14
-; FALLBACK10-NEXT: pushq %r13
; FALLBACK10-NEXT: pushq %r12
; FALLBACK10-NEXT: pushq %rbx
-; FALLBACK10-NEXT: subq $24, %rsp
+; FALLBACK10-NEXT: pushq %rax
; FALLBACK10-NEXT: vmovups (%rdi), %ymm0
; FALLBACK10-NEXT: vmovups 32(%rdi), %ymm1
-; FALLBACK10-NEXT: movl (%rsi), %eax
+; FALLBACK10-NEXT: movl (%rsi), %esi
; FALLBACK10-NEXT: vxorps %xmm2, %xmm2, %xmm2
; FALLBACK10-NEXT: vmovups %ymm2, -{{[0-9]+}}(%rsp)
; FALLBACK10-NEXT: vmovups %ymm2, -{{[0-9]+}}(%rsp)
; FALLBACK10-NEXT: vmovups %ymm1, -{{[0-9]+}}(%rsp)
; FALLBACK10-NEXT: vmovups %ymm0, -{{[0-9]+}}(%rsp)
-; FALLBACK10-NEXT: leal (,%rax,8), %ecx
-; FALLBACK10-NEXT: andl $56, %ecx
+; FALLBACK10-NEXT: leal (,%rsi,8), %eax
; FALLBACK10-NEXT: andl $56, %eax
-; FALLBACK10-NEXT: negl %eax
-; FALLBACK10-NEXT: movslq %eax, %rsi
-; FALLBACK10-NEXT: movq -8(%rsp,%rsi), %rax
-; FALLBACK10-NEXT: shlxq %rcx, %rax, %r12
-; FALLBACK10-NEXT: movq -16(%rsp,%rsi), %rdi
-; FALLBACK10-NEXT: shlxq %rcx, %rdi, %r15
-; FALLBACK10-NEXT: movq -24(%rsp,%rsi), %r13
-; FALLBACK10-NEXT: shlxq %rcx, %r13, %r8
-; FALLBACK10-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; FALLBACK10-NEXT: movq -32(%rsp,%rsi), %r11
-; FALLBACK10-NEXT: shlxq %rcx, %r11, %r10
-; FALLBACK10-NEXT: movq -40(%rsp,%rsi), %r14
-; FALLBACK10-NEXT: shlxq %rcx, %r14, %rbx
-; FALLBACK10-NEXT: movl %ecx, %r9d
-; FALLBACK10-NEXT: notb %r9b
+; FALLBACK10-NEXT: movl %eax, %ecx
+; FALLBACK10-NEXT: andl $56, %esi
+; FALLBACK10-NEXT: negl %esi
+; FALLBACK10-NEXT: movslq %esi, %rsi
+; FALLBACK10-NEXT: movq -24(%rsp,%rsi), %rdi
+; FALLBACK10-NEXT: shlxq %rcx, %rdi, %r9
+; FALLBACK10-NEXT: notb %al
+; FALLBACK10-NEXT: movq -32(%rsp,%rsi), %r8
+; FALLBACK10-NEXT: shlxq %rcx, %r8, %r10
+; FALLBACK10-NEXT: shrq %r8
+; FALLBACK10-NEXT: shrxq %rax, %r8, %r8
+; FALLBACK10-NEXT: orq %r9, %r8
+; FALLBACK10-NEXT: movq -40(%rsp,%rsi), %r9
+; FALLBACK10-NEXT: shlxq %rcx, %r9, %r11
+; FALLBACK10-NEXT: shrq %r9
+; FALLBACK10-NEXT: shrxq %rax, %r9, %r9
+; FALLBACK10-NEXT: orq %r10, %r9
+; FALLBACK10-NEXT: movq -48(%rsp,%rsi), %r10
+; FALLBACK10-NEXT: shlxq %rcx, %r10, %r14
+; FALLBACK10-NEXT: shrq %r10
+; FALLBACK10-NEXT: shrxq %rax, %r10, %r10
+; FALLBACK10-NEXT: orq %r11, %r10
+; FALLBACK10-NEXT: movq -64(%rsp,%rsi), %rbx
+; FALLBACK10-NEXT: movq -56(%rsp,%rsi), %r11
+; FALLBACK10-NEXT: shlxq %rcx, %r11, %r15
+; FALLBACK10-NEXT: shrq %r11
+; FALLBACK10-NEXT: shrxq %rax, %r11, %r11
+; FALLBACK10-NEXT: orq %r14, %r11
+; FALLBACK10-NEXT: shlxq %rcx, %rbx, %r14
+; FALLBACK10-NEXT: shrq %rbx
+; FALLBACK10-NEXT: shrxq %rax, %rbx, %rbx
+; FALLBACK10-NEXT: orq %r15, %rbx
+; FALLBACK10-NEXT: movq -16(%rsp,%rsi), %r15
+; FALLBACK10-NEXT: shlxq %rcx, %r15, %r12
; FALLBACK10-NEXT: shrq %rdi
-; FALLBACK10-NEXT: shrxq %r9, %rdi, %rdi
+; FALLBACK10-NEXT: shrxq %rax, %rdi, %rdi
; FALLBACK10-NEXT: orq %r12, %rdi
-; FALLBACK10-NEXT: movq (%rsp,%rsi), %rbp
-; FALLBACK10-NEXT: shlxq %rcx, %rbp, %r8
-; FALLBACK10-NEXT: shrq %r13
-; FALLBACK10-NEXT: shrxq %r9, %r13, %r12
-; FALLBACK10-NEXT: orq %r15, %r12
-; FALLBACK10-NEXT: shlxq %rcx, 8(%rsp,%rsi), %r15
-; FALLBACK10-NEXT: movq -48(%rsp,%rsi), %rsi
-; FALLBACK10-NEXT: shlxq %rcx, %rsi, %rcx
-; FALLBACK10-NEXT: shrq %r11
-; FALLBACK10-NEXT: shrxq %r9, %r11, %r11
-; FALLBACK10-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
-; FALLBACK10-NEXT: shrq %r14
-; FALLBACK10-NEXT: shrxq %r9, %r14, %r14
-; FALLBACK10-NEXT: orq %r10, %r14
-; FALLBACK10-NEXT: shrq %rsi
-; FALLBACK10-NEXT: shrxq %r9, %rsi, %rsi
-; FALLBACK10-NEXT: orq %rbx, %rsi
-; FALLBACK10-NEXT: shrq %rax
-; FALLBACK10-NEXT: shrxq %r9, %rax, %rax
-; FALLBACK10-NEXT: orq %r8, %rax
-; FALLBACK10-NEXT: shrq %rbp
-; FALLBACK10-NEXT: shrxq %r9, %rbp, %r8
-; FALLBACK10-NEXT: orq %r15, %r8
-; FALLBACK10-NEXT: movq %rcx, (%rdx)
-; FALLBACK10-NEXT: movq %r8, 56(%rdx)
-; FALLBACK10-NEXT: movq %rax, 48(%rdx)
-; FALLBACK10-NEXT: movq %rsi, 8(%rdx)
-; FALLBACK10-NEXT: movq %r14, 16(%rdx)
-; FALLBACK10-NEXT: movq %r11, 24(%rdx)
-; FALLBACK10-NEXT: movq %r12, 32(%rdx)
-; FALLBACK10-NEXT: movq %rdi, 40(%rdx)
-; FALLBACK10-NEXT: addq $24, %rsp
+; FALLBACK10-NEXT: shlxq %rcx, -8(%rsp,%rsi), %rcx
+; FALLBACK10-NEXT: shrq %r15
+; FALLBACK10-NEXT: shrxq %rax, %r15, %rax
+; FALLBACK10-NEXT: orq %rcx, %rax
+; FALLBACK10-NEXT: movq %r14, (%rdx)
+; FALLBACK10-NEXT: movq %rax, 56(%rdx)
+; FALLBACK10-NEXT: movq %rdi, 48(%rdx)
+; FALLBACK10-NEXT: movq %rbx, 8(%rdx)
+; FALLBACK10-NEXT: movq %r11, 16(%rdx)
+; FALLBACK10-NEXT: movq %r10, 24(%rdx)
+; FALLBACK10-NEXT: movq %r9, 32(%rdx)
+; FALLBACK10-NEXT: movq %r8, 40(%rdx)
+; FALLBACK10-NEXT: addq $8, %rsp
; FALLBACK10-NEXT: popq %rbx
; FALLBACK10-NEXT: popq %r12
-; FALLBACK10-NEXT: popq %r13
; FALLBACK10-NEXT: popq %r14
; FALLBACK10-NEXT: popq %r15
-; FALLBACK10-NEXT: popq %rbp
; FALLBACK10-NEXT: vzeroupper
; FALLBACK10-NEXT: retq
;
@@ -17071,77 +17030,72 @@ define void @shl_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
;
; FALLBACK14-LABEL: shl_64bytes:
; FALLBACK14: # %bb.0:
-; FALLBACK14-NEXT: pushq %rbp
; FALLBACK14-NEXT: pushq %r15
; FALLBACK14-NEXT: pushq %r14
-; FALLBACK14-NEXT: pushq %r13
; FALLBACK14-NEXT: pushq %r12
; FALLBACK14-NEXT: pushq %rbx
-; FALLBACK14-NEXT: subq $24, %rsp
+; FALLBACK14-NEXT: pushq %rax
; FALLBACK14-NEXT: vmovups (%rdi), %zmm0
-; FALLBACK14-NEXT: movl (%rsi), %eax
+; FALLBACK14-NEXT: movl (%rsi), %esi
; FALLBACK14-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FALLBACK14-NEXT: vmovups %zmm1, -{{[0-9]+}}(%rsp)
; FALLBACK14-NEXT: vmovups %zmm0, -{{[0-9]+}}(%rsp)
-; FALLBACK14-NEXT: leal (,%rax,8), %ecx
-; FALLBACK14-NEXT: andl $56, %ecx
+; FALLBACK14-NEXT: leal (,%rsi,8), %eax
; FALLBACK14-NEXT: andl $56, %eax
-; FALLBACK14-NEXT: negl %eax
-; FALLBACK14-NEXT: movslq %eax, %rsi
-; FALLBACK14-NEXT: movq -8(%rsp,%rsi), %rax
-; FALLBACK14-NEXT: shlxq %rcx, %rax, %r12
-; FALLBACK14-NEXT: movq -16(%rsp,%rsi), %rdi
-; FALLBACK14-NEXT: shlxq %rcx, %rdi, %r15
-; FALLBACK14-NEXT: movq -24(%rsp,%rsi), %r13
-; FALLBACK14-NEXT: shlxq %rcx, %r13, %r8
-; FALLBACK14-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; FALLBACK14-NEXT: movq -32(%rsp,%rsi), %r11
-; FALLBACK14-NEXT: shlxq %rcx, %r11, %r10
-; FALLBACK14-NEXT: movq -40(%rsp,%rsi), %r14
-; FALLBACK14-NEXT: shlxq %rcx, %r14, %rbx
-; FALLBACK14-NEXT: movl %ecx, %r9d
-; FALLBACK14-NEXT: notb %r9b
+; FALLBACK14-NEXT: movl %eax, %ecx
+; FALLBACK14-NEXT: andl $56, %esi
+; FALLBACK14-NEXT: negl %esi
+; FALLBACK14-NEXT: movslq %esi, %rsi
+; FALLBACK14-NEXT: movq -24(%rsp,%rsi), %rdi
+; FALLBACK14-NEXT: shlxq %rcx, %rdi, %r9
+; FALLBACK14-NEXT: notb %al
+; FALLBACK14-NEXT: movq -32(%rsp,%rsi), %r8
+; FALLBACK14-NEXT: shlxq %rcx, %r8, %r10
+; FALLBACK14-NEXT: shrq %r8
+; FALLBACK14-NEXT: shrxq %rax, %r8, %r8
+; FALLBACK14-NEXT: orq %r9, %r8
+; FALLBACK14-NEXT: movq -40(%rsp,%rsi), %r9
+; FALLBACK14-NEXT: shlxq %rcx, %r9, %r11
+; FALLBACK14-NEXT: shrq %r9
+; FALLBACK14-NEXT: shrxq %rax, %r9, %r9
+; FALLBACK14-NEXT: orq %r10, %r9
+; FALLBACK14-NEXT: movq -48(%rsp,%rsi), %r10
+; FALLBACK14-NEXT: shlxq %rcx, %r10, %r14
+; FALLBACK14-NEXT: shrq %r10
+; FALLBACK14-NEXT: shrxq %rax, %r10, %r10
+; FALLBACK14-NEXT: orq %r11, %r10
+; FALLBACK14-NEXT: movq -64(%rsp,%rsi), %rbx
+; FALLBACK14-NEXT: movq -56(%rsp,%rsi), %r11
+; FALLBACK14-NEXT: shlxq %rcx, %r11, %r15
+; FALLBACK14-NEXT: shrq %r11
+; FALLBACK14-NEXT: shrxq %rax, %r11, %r11
+; FALLBACK14-NEXT: orq %r14, %r11
+; FALLBACK14-NEXT: shlxq %rcx, %rbx, %r14
+; FALLBACK14-NEXT: shrq %rbx
+; FALLBACK14-NEXT: shrxq %rax, %rbx, %rbx
+; FALLBACK14-NEXT: orq %r15, %rbx
+; FALLBACK14-NEXT: movq -16(%rsp,%rsi), %r15
+; FALLBACK14-NEXT: shlxq %rcx, %r15, %r12
; FALLBACK14-NEXT: shrq %rdi
-; FALLBACK14-NEXT: shrxq %r9, %rdi, %rdi
+; FALLBACK14-NEXT: shrxq %rax, %rdi, %rdi
; FALLBACK14-NEXT: orq %r12, %rdi
-; FALLBACK14-NEXT: movq (%rsp,%rsi), %rbp
-; FALLBACK14-NEXT: shlxq %rcx, %rbp, %r8
-; FALLBACK14-NEXT: shrq %r13
-; FALLBACK14-NEXT: shrxq %r9, %r13, %r12
-; FALLBACK14-NEXT: orq %r15, %r12
-; FALLBACK14-NEXT: shlxq %rcx, 8(%rsp,%rsi), %r15
-; FALLBACK14-NEXT: movq -48(%rsp,%rsi), %rsi
-; FALLBACK14-NEXT: shlxq %rcx, %rsi, %rcx
-; FALLBACK14-NEXT: shrq %r11
-; FALLBACK14-NEXT: shrxq %r9, %r11, %r11
-; FALLBACK14-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
-; FALLBACK14-NEXT: shrq %r14
-; FALLBACK14-NEXT: shrxq %r9, %r14, %r14
-; FALLBACK14-NEXT: orq %r10, %r14
-; FALLBACK14-NEXT: shrq %rsi
-; FALLBACK14-NEXT: shrxq %r9, %rsi, %rsi
-; FALLBACK14-NEXT: orq %rbx, %rsi
-; FALLBACK14-NEXT: shrq %rax
-; FALLBACK14-NEXT: shrxq %r9, %rax, %rax
-; FALLBACK14-NEXT: orq %r8, %rax
-; FALLBACK14-NEXT: shrq %rbp
-; FALLBACK14-NEXT: shrxq %r9, %rbp, %r8
-; FALLBACK14-NEXT: orq %r15, %r8
-; FALLBACK14-NEXT: movq %rcx, (%rdx)
-; FALLBACK14-NEXT: movq %r8, 56(%rdx)
-; FALLBACK14-NEXT: movq %rax, 48(%rdx)
-; FALLBACK14-NEXT: movq %rsi, 8(%rdx)
-; FALLBACK14-NEXT: movq %r14, 16(%rdx)
-; FALLBACK14-NEXT: movq %r11, 24(%rdx)
-; FALLBACK14-NEXT: movq %r12, 32(%rdx)
-; FALLBACK14-NEXT: movq %rdi, 40(%rdx)
-; FALLBACK14-NEXT: addq $24, %rsp
+; FALLBACK14-NEXT: shlxq %rcx, -8(%rsp,%rsi), %rcx
+; FALLBACK14-NEXT: shrq %r15
+; FALLBACK14-NEXT: shrxq %rax, %r15, %rax
+; FALLBACK14-NEXT: orq %rcx, %rax
+; FALLBACK14-NEXT: movq %r14, (%rdx)
+; FALLBACK14-NEXT: movq %rax, 56(%rdx)
+; FALLBACK14-NEXT: movq %rdi, 48(%rdx)
+; FALLBACK14-NEXT: movq %rbx, 8(%rdx)
+; FALLBACK14-NEXT: movq %r11, 16(%rdx)
+; FALLBACK14-NEXT: movq %r10, 24(%rdx)
+; FALLBACK14-NEXT: movq %r9, 32(%rdx)
+; FALLBACK14-NEXT: movq %r8, 40(%rdx)
+; FALLBACK14-NEXT: addq $8, %rsp
; FALLBACK14-NEXT: popq %rbx
; FALLBACK14-NEXT: popq %r12
-; FALLBACK14-NEXT: popq %r13
; FALLBACK14-NEXT: popq %r14
; FALLBACK14-NEXT: popq %r15
-; FALLBACK14-NEXT: popq %rbp
; FALLBACK14-NEXT: vzeroupper
; FALLBACK14-NEXT: retq
;
@@ -17681,144 +17635,149 @@ define void @shl_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; FALLBACK18-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; FALLBACK18-NEXT: leal (,%ebp,8), %edx
-; FALLBACK18-NEXT: andl $24, %edx
+; FALLBACK18-NEXT: leal (,%ebp,8), %ebx
+; FALLBACK18-NEXT: andl $24, %ebx
+; FALLBACK18-NEXT: movl %ebx, %eax
; FALLBACK18-NEXT: andl $60, %ebp
; FALLBACK18-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: leal {{[0-9]+}}(%esp), %edi
-; FALLBACK18-NEXT: subl %ebp, %edi
-; FALLBACK18-NEXT: movl (%edi), %ecx
-; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl 4(%edi), %eax
-; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl %edx, %ebx
+; FALLBACK18-NEXT: leal {{[0-9]+}}(%esp), %edx
+; FALLBACK18-NEXT: subl %ebp, %edx
+; FALLBACK18-NEXT: movl (%edx), %esi
+; FALLBACK18-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 4(%edx), %ecx
; FALLBACK18-NEXT: notb %bl
-; FALLBACK18-NEXT: shrl %ecx
-; FALLBACK18-NEXT: shrxl %ebx, %ecx, %esi
-; FALLBACK18-NEXT: shlxl %edx, %eax, %ecx
-; FALLBACK18-NEXT: orl %ecx, %esi
+; FALLBACK18-NEXT: shrl %esi
+; FALLBACK18-NEXT: shrxl %ebx, %esi, %edi
+; FALLBACK18-NEXT: shlxl %eax, %ecx, %esi
+; FALLBACK18-NEXT: movl %eax, %ebp
+; FALLBACK18-NEXT: orl %esi, %edi
+; FALLBACK18-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 8(%edx), %esi
; FALLBACK18-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl 8(%edi), %esi
-; FALLBACK18-NEXT: movl %esi, %ecx
-; FALLBACK18-NEXT: shrl %ecx
-; FALLBACK18-NEXT: shrxl %ebx, %ecx, %eax
-; FALLBACK18-NEXT: movl 12(%edi), %ecx
-; FALLBACK18-NEXT: shlxl %edx, %ecx, %ebp
-; FALLBACK18-NEXT: orl %ebp, %eax
-; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shlxl %edx, %esi, %esi
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK18-NEXT: shrl %eax
-; FALLBACK18-NEXT: shrxl %ebx, %eax, %eax
-; FALLBACK18-NEXT: orl %esi, %eax
-; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl 16(%edi), %eax
-; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shrl %eax
-; FALLBACK18-NEXT: shrxl %ebx, %eax, %eax
-; FALLBACK18-NEXT: movl 20(%edi), %esi
-; FALLBACK18-NEXT: shlxl %edx, %esi, %ebp
+; FALLBACK18-NEXT: shrl %esi
+; FALLBACK18-NEXT: shrxl %ebx, %esi, %eax
+; FALLBACK18-NEXT: movl 12(%edx), %esi
+; FALLBACK18-NEXT: movl %ebp, %edi
+; FALLBACK18-NEXT: shlxl %ebp, %esi, %ebp
; FALLBACK18-NEXT: orl %ebp, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK18-NEXT: shlxl %edi, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK18-NEXT: shrl %ecx
; FALLBACK18-NEXT: shrxl %ebx, %ecx, %ecx
; FALLBACK18-NEXT: orl %eax, %ecx
; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl 24(%edi), %ecx
+; FALLBACK18-NEXT: movl 16(%edx), %ecx
; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK18-NEXT: shrl %ecx
; FALLBACK18-NEXT: shrxl %ebx, %ecx, %eax
-; FALLBACK18-NEXT: movl 28(%edi), %ecx
-; FALLBACK18-NEXT: shlxl %edx, %ecx, %ebp
+; FALLBACK18-NEXT: movl 20(%edx), %ecx
+; FALLBACK18-NEXT: shlxl %edi, %ecx, %ebp
; FALLBACK18-NEXT: orl %ebp, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK18-NEXT: shlxl %edi, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; FALLBACK18-NEXT: shrl %esi
-; FALLBACK18-NEXT: shrxl %ebx, %esi, %esi
-; FALLBACK18-NEXT: orl %eax, %esi
-; FALLBACK18-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl 32(%edi), %eax
+; FALLBACK18-NEXT: shrxl %ebx, %esi, %eax
+; FALLBACK18-NEXT: orl %ebp, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shrl %eax
-; FALLBACK18-NEXT: shrxl %ebx, %eax, %eax
-; FALLBACK18-NEXT: movl 36(%edi), %esi
-; FALLBACK18-NEXT: shlxl %edx, %esi, %ebp
+; FALLBACK18-NEXT: movl 24(%edx), %esi
+; FALLBACK18-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shrl %esi
+; FALLBACK18-NEXT: shrxl %ebx, %esi, %eax
+; FALLBACK18-NEXT: movl 28(%edx), %esi
+; FALLBACK18-NEXT: shlxl %edi, %esi, %ebp
; FALLBACK18-NEXT: orl %ebp, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK18-NEXT: shlxl %edi, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; FALLBACK18-NEXT: shrl %ecx
-; FALLBACK18-NEXT: shrxl %ebx, %ecx, %ecx
-; FALLBACK18-NEXT: orl %eax, %ecx
-; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl 40(%edi), %ecx
+; FALLBACK18-NEXT: shrxl %ebx, %ecx, %eax
+; FALLBACK18-NEXT: orl %ebp, %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 32(%edx), %ecx
; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK18-NEXT: shrl %ecx
; FALLBACK18-NEXT: shrxl %ebx, %ecx, %eax
-; FALLBACK18-NEXT: movl 44(%edi), %ecx
-; FALLBACK18-NEXT: shlxl %edx, %ecx, %ebp
+; FALLBACK18-NEXT: movl 36(%edx), %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shlxl %edi, %ecx, %ebp
; FALLBACK18-NEXT: orl %ebp, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK18-NEXT: shlxl %edi, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; FALLBACK18-NEXT: movl %edi, %eax
; FALLBACK18-NEXT: shrl %esi
; FALLBACK18-NEXT: shrxl %ebx, %esi, %esi
-; FALLBACK18-NEXT: orl %eax, %esi
-; FALLBACK18-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl 48(%edi), %esi
+; FALLBACK18-NEXT: orl %ebp, %esi
; FALLBACK18-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 40(%edx), %edi
+; FALLBACK18-NEXT: movl %edi, %esi
; FALLBACK18-NEXT: shrl %esi
-; FALLBACK18-NEXT: shrxl %ebx, %esi, %eax
-; FALLBACK18-NEXT: movl 52(%edi), %esi
-; FALLBACK18-NEXT: shlxl %edx, %esi, %ebp
-; FALLBACK18-NEXT: orl %ebp, %eax
+; FALLBACK18-NEXT: shrxl %ebx, %esi, %ecx
+; FALLBACK18-NEXT: movl 44(%edx), %esi
+; FALLBACK18-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shlxl %eax, %esi, %ebp
+; FALLBACK18-NEXT: orl %ebp, %ecx
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shlxl %eax, %edi, %edi
+; FALLBACK18-NEXT: movl %eax, %esi
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: shrl %eax
+; FALLBACK18-NEXT: shrxl %ebx, %eax, %eax
+; FALLBACK18-NEXT: orl %edi, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; FALLBACK18-NEXT: shrl %ecx
-; FALLBACK18-NEXT: shrxl %ebx, %ecx, %ebp
-; FALLBACK18-NEXT: orl %eax, %ebp
-; FALLBACK18-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK18-NEXT: movl 48(%edx), %ebp
+; FALLBACK18-NEXT: movl %ebp, %edi
+; FALLBACK18-NEXT: shrl %edi
+; FALLBACK18-NEXT: shrxl %ebx, %edi, %eax
+; FALLBACK18-NEXT: movl 52(%edx), %ecx
+; FALLBACK18-NEXT: shlxl %esi, %ecx, %edi
+; FALLBACK18-NEXT: orl %edi, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shlxl %esi, %ebp, %edi
+; FALLBACK18-NEXT: movl %esi, %ebp
; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK18-NEXT: negl %eax
-; FALLBACK18-NEXT: shlxl %edx, 188(%esp,%eax), %ecx
-; FALLBACK18-NEXT: movl 56(%edi), %eax
-; FALLBACK18-NEXT: shlxl %edx, %eax, %edx
-; FALLBACK18-NEXT: shrl %esi
-; FALLBACK18-NEXT: shrxl %ebx, %esi, %esi
-; FALLBACK18-NEXT: orl %edx, %esi
; FALLBACK18-NEXT: shrl %eax
-; FALLBACK18-NEXT: shrxl %ebx, %eax, %eax
-; FALLBACK18-NEXT: orl %eax, %ecx
-; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %eax
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; FALLBACK18-NEXT: movl %edx, (%eax)
-; FALLBACK18-NEXT: movl %esi, 56(%eax)
-; FALLBACK18-NEXT: movl %ecx, 60(%eax)
-; FALLBACK18-NEXT: movl %ebp, 48(%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, 52(%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, 40(%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, 44(%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, 32(%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, 36(%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, 24(%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, 28(%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, 16(%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, 20(%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, 8(%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, 12(%eax)
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK18-NEXT: movl %ecx, 4(%eax)
+; FALLBACK18-NEXT: shrxl %ebx, %eax, %esi
+; FALLBACK18-NEXT: orl %edi, %esi
+; FALLBACK18-NEXT: movl 56(%edx), %edi
+; FALLBACK18-NEXT: shrl %ecx
+; FALLBACK18-NEXT: shrxl %ebx, %ecx, %eax
+; FALLBACK18-NEXT: shlxl %ebp, %edi, %ecx
+; FALLBACK18-NEXT: orl %ecx, %eax
+; FALLBACK18-NEXT: shrl %edi
+; FALLBACK18-NEXT: shrxl %ebx, %edi, %ecx
+; FALLBACK18-NEXT: shlxl %ebp, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; FALLBACK18-NEXT: negl %ebx
+; FALLBACK18-NEXT: shlxl %ebp, 188(%esp,%ebx), %ebx
+; FALLBACK18-NEXT: orl %ecx, %ebx
+; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK18-NEXT: movl %edi, (%edx)
+; FALLBACK18-NEXT: movl %eax, 56(%edx)
+; FALLBACK18-NEXT: movl %ebx, 60(%edx)
+; FALLBACK18-NEXT: movl %esi, 48(%edx)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, 52(%edx)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, 40(%edx)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, 44(%edx)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, 32(%edx)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, 36(%edx)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, 24(%edx)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, 28(%edx)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, 16(%edx)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, 20(%edx)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, 8(%edx)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, 12(%edx)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl %eax, 4(%edx)
; FALLBACK18-NEXT: addl $204, %esp
; FALLBACK18-NEXT: popl %esi
; FALLBACK18-NEXT: popl %edi
@@ -18342,144 +18301,150 @@ define void @shl_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK22-NEXT: movaps %xmm2, {{[0-9]+}}(%esp)
; FALLBACK22-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
; FALLBACK22-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
-; FALLBACK22-NEXT: leal (,%eax,8), %edx
-; FALLBACK22-NEXT: andl $24, %edx
+; FALLBACK22-NEXT: leal (,%eax,8), %ebx
+; FALLBACK22-NEXT: andl $24, %ebx
+; FALLBACK22-NEXT: movl %ebx, %ecx
; FALLBACK22-NEXT: andl $60, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: leal {{[0-9]+}}(%esp), %edi
-; FALLBACK22-NEXT: subl %eax, %edi
-; FALLBACK22-NEXT: movl (%edi), %ecx
-; FALLBACK22-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: movl 4(%edi), %eax
+; FALLBACK22-NEXT: leal {{[0-9]+}}(%esp), %edx
+; FALLBACK22-NEXT: subl %eax, %edx
+; FALLBACK22-NEXT: movl (%edx), %esi
+; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl 4(%edx), %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: movl %edx, %ebx
; FALLBACK22-NEXT: notb %bl
-; FALLBACK22-NEXT: shrl %ecx
-; FALLBACK22-NEXT: shrxl %ebx, %ecx, %esi
-; FALLBACK22-NEXT: shlxl %edx, %eax, %ecx
-; FALLBACK22-NEXT: orl %ecx, %esi
+; FALLBACK22-NEXT: shrl %esi
+; FALLBACK22-NEXT: shrxl %ebx, %esi, %edi
+; FALLBACK22-NEXT: shlxl %ecx, %eax, %esi
+; FALLBACK22-NEXT: orl %esi, %edi
+; FALLBACK22-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl 8(%edx), %esi
; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: movl 8(%edi), %esi
-; FALLBACK22-NEXT: movl %esi, %ecx
-; FALLBACK22-NEXT: shrl %ecx
-; FALLBACK22-NEXT: shrxl %ebx, %ecx, %eax
-; FALLBACK22-NEXT: movl 12(%edi), %ecx
-; FALLBACK22-NEXT: shlxl %edx, %ecx, %ebp
-; FALLBACK22-NEXT: orl %ebp, %eax
-; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shlxl %edx, %esi, %esi
-; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK22-NEXT: shrl %eax
-; FALLBACK22-NEXT: shrxl %ebx, %eax, %eax
-; FALLBACK22-NEXT: orl %esi, %eax
-; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: movl 16(%edi), %eax
-; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shrl %eax
-; FALLBACK22-NEXT: shrxl %ebx, %eax, %eax
-; FALLBACK22-NEXT: movl 20(%edi), %esi
-; FALLBACK22-NEXT: shlxl %edx, %esi, %ebp
+; FALLBACK22-NEXT: shrl %esi
+; FALLBACK22-NEXT: shrxl %ebx, %esi, %eax
+; FALLBACK22-NEXT: movl 12(%edx), %esi
+; FALLBACK22-NEXT: shlxl %ecx, %esi, %ebp
; FALLBACK22-NEXT: orl %ebp, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK22-NEXT: movl %ecx, %edi
+; FALLBACK22-NEXT: shlxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; FALLBACK22-NEXT: shrl %ecx
; FALLBACK22-NEXT: shrxl %ebx, %ecx, %ecx
; FALLBACK22-NEXT: orl %eax, %ecx
; FALLBACK22-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: movl 24(%edi), %ecx
+; FALLBACK22-NEXT: movl 16(%edx), %ecx
; FALLBACK22-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK22-NEXT: shrl %ecx
; FALLBACK22-NEXT: shrxl %ebx, %ecx, %eax
-; FALLBACK22-NEXT: movl 28(%edi), %ecx
-; FALLBACK22-NEXT: shlxl %edx, %ecx, %ebp
+; FALLBACK22-NEXT: movl 20(%edx), %ecx
+; FALLBACK22-NEXT: shlxl %edi, %ecx, %ebp
; FALLBACK22-NEXT: orl %ebp, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK22-NEXT: shlxl %edi, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; FALLBACK22-NEXT: shrl %esi
-; FALLBACK22-NEXT: shrxl %ebx, %esi, %esi
-; FALLBACK22-NEXT: orl %eax, %esi
-; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: movl 32(%edi), %eax
+; FALLBACK22-NEXT: shrxl %ebx, %esi, %eax
+; FALLBACK22-NEXT: orl %ebp, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shrl %eax
-; FALLBACK22-NEXT: shrxl %ebx, %eax, %eax
-; FALLBACK22-NEXT: movl 36(%edi), %esi
-; FALLBACK22-NEXT: shlxl %edx, %esi, %ebp
+; FALLBACK22-NEXT: movl 24(%edx), %esi
+; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shrl %esi
+; FALLBACK22-NEXT: shrxl %ebx, %esi, %eax
+; FALLBACK22-NEXT: movl 28(%edx), %esi
+; FALLBACK22-NEXT: shlxl %edi, %esi, %ebp
; FALLBACK22-NEXT: orl %ebp, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK22-NEXT: shlxl %edi, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; FALLBACK22-NEXT: shrl %ecx
-; FALLBACK22-NEXT: shrxl %ebx, %ecx, %ecx
-; FALLBACK22-NEXT: orl %eax, %ecx
-; FALLBACK22-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: movl 40(%edi), %ecx
+; FALLBACK22-NEXT: shrxl %ebx, %ecx, %eax
+; FALLBACK22-NEXT: orl %ebp, %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl 32(%edx), %ecx
; FALLBACK22-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK22-NEXT: shrl %ecx
; FALLBACK22-NEXT: shrxl %ebx, %ecx, %eax
-; FALLBACK22-NEXT: movl 44(%edi), %ecx
-; FALLBACK22-NEXT: shlxl %edx, %ecx, %ebp
+; FALLBACK22-NEXT: movl 36(%edx), %ecx
+; FALLBACK22-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shlxl %edi, %ecx, %ebp
; FALLBACK22-NEXT: orl %ebp, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK22-NEXT: shlxl %edi, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; FALLBACK22-NEXT: movl %edi, %eax
; FALLBACK22-NEXT: shrl %esi
; FALLBACK22-NEXT: shrxl %ebx, %esi, %esi
-; FALLBACK22-NEXT: orl %eax, %esi
-; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: movl 48(%edi), %esi
+; FALLBACK22-NEXT: orl %ebp, %esi
; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl 40(%edx), %edi
+; FALLBACK22-NEXT: movl %edi, %esi
; FALLBACK22-NEXT: shrl %esi
-; FALLBACK22-NEXT: shrxl %ebx, %esi, %eax
-; FALLBACK22-NEXT: movl 52(%edi), %esi
-; FALLBACK22-NEXT: shlxl %edx, %esi, %ebp
-; FALLBACK22-NEXT: orl %ebp, %eax
+; FALLBACK22-NEXT: shrxl %ebx, %esi, %ecx
+; FALLBACK22-NEXT: movl 44(%edx), %esi
+; FALLBACK22-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shlxl %eax, %esi, %ebp
+; FALLBACK22-NEXT: orl %ebp, %ecx
+; FALLBACK22-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shlxl %eax, %edi, %edi
+; FALLBACK22-NEXT: movl %eax, %esi
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: shrl %eax
+; FALLBACK22-NEXT: shrxl %ebx, %eax, %eax
+; FALLBACK22-NEXT: orl %edi, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; FALLBACK22-NEXT: shrl %ecx
-; FALLBACK22-NEXT: shrxl %ebx, %ecx, %ebp
-; FALLBACK22-NEXT: orl %eax, %ebp
-; FALLBACK22-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK22-NEXT: movl 48(%edx), %ebp
+; FALLBACK22-NEXT: movl %ebp, %edi
+; FALLBACK22-NEXT: shrl %edi
+; FALLBACK22-NEXT: shrxl %ebx, %edi, %eax
+; FALLBACK22-NEXT: movl 52(%edx), %ecx
+; FALLBACK22-NEXT: shlxl %esi, %ecx, %edi
+; FALLBACK22-NEXT: orl %edi, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shlxl %esi, %ebp, %edi
+; FALLBACK22-NEXT: movl %esi, %ebp
; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK22-NEXT: negl %eax
-; FALLBACK22-NEXT: shlxl %edx, 188(%esp,%eax), %ecx
-; FALLBACK22-NEXT: movl 56(%edi), %eax
-; FALLBACK22-NEXT: shlxl %edx, %eax, %edx
-; FALLBACK22-NEXT: shrl %esi
-; FALLBACK22-NEXT: shrxl %ebx, %esi, %esi
-; FALLBACK22-NEXT: orl %edx, %esi
; FALLBACK22-NEXT: shrl %eax
-; FALLBACK22-NEXT: shrxl %ebx, %eax, %eax
-; FALLBACK22-NEXT: orl %eax, %ecx
-; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %eax
-; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; FALLBACK22-NEXT: movl %edx, (%eax)
-; FALLBACK22-NEXT: movl %esi, 56(%eax)
-; FALLBACK22-NEXT: movl %ecx, 60(%eax)
-; FALLBACK22-NEXT: movl %ebp, 48(%eax)
-; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK22-NEXT: movl %ecx, 52(%eax)
-; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK22-NEXT: movl %ecx, 40(%eax)
-; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK22-NEXT: movl %ecx, 44(%eax)
-; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK22-NEXT: movl %ecx, 32(%eax)
-; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK22-NEXT: movl %ecx, 36(%eax)
-; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK22-NEXT: movl %ecx, 24(%eax)
-; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK22-NEXT: movl %ecx, 28(%eax)
-; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK22-NEXT: movl %ecx, 16(%eax)
-; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK22-NEXT: movl %ecx, 20(%eax)
-; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK22-NEXT: movl %ecx, 8(%eax)
-; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK22-NEXT: movl %ecx, 12(%eax)
-; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK22-NEXT: movl %ecx, 4(%eax)
+; FALLBACK22-NEXT: shrxl %ebx, %eax, %esi
+; FALLBACK22-NEXT: orl %edi, %esi
+; FALLBACK22-NEXT: movl 56(%edx), %edi
+; FALLBACK22-NEXT: shrl %ecx
+; FALLBACK22-NEXT: shrxl %ebx, %ecx, %eax
+; FALLBACK22-NEXT: shlxl %ebp, %edi, %ecx
+; FALLBACK22-NEXT: orl %ecx, %eax
+; FALLBACK22-NEXT: shrl %edi
+; FALLBACK22-NEXT: shrxl %ebx, %edi, %ecx
+; FALLBACK22-NEXT: shlxl %ebp, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; FALLBACK22-NEXT: negl %ebx
+; FALLBACK22-NEXT: shlxl %ebp, 188(%esp,%ebx), %ebx
+; FALLBACK22-NEXT: orl %ecx, %ebx
+; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK22-NEXT: movl %edi, (%edx)
+; FALLBACK22-NEXT: movl %eax, 56(%edx)
+; FALLBACK22-NEXT: movl %ebx, 60(%edx)
+; FALLBACK22-NEXT: movl %esi, 48(%edx)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: movl %eax, 52(%edx)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: movl %eax, 40(%edx)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: movl %eax, 44(%edx)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: movl %eax, 32(%edx)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: movl %eax, 36(%edx)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: movl %eax, 24(%edx)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: movl %eax, 28(%edx)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: movl %eax, 16(%edx)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: movl %eax, 20(%edx)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: movl %eax, 8(%edx)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: movl %eax, 12(%edx)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: movl %eax, 4(%edx)
; FALLBACK22-NEXT: addl $204, %esp
; FALLBACK22-NEXT: popl %esi
; FALLBACK22-NEXT: popl %edi
@@ -18943,144 +18908,150 @@ define void @shl_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK26-NEXT: vmovups %ymm2, {{[0-9]+}}(%esp)
; FALLBACK26-NEXT: vmovups %ymm1, {{[0-9]+}}(%esp)
; FALLBACK26-NEXT: vmovups %ymm0, {{[0-9]+}}(%esp)
-; FALLBACK26-NEXT: leal (,%eax,8), %edx
-; FALLBACK26-NEXT: andl $24, %edx
+; FALLBACK26-NEXT: leal (,%eax,8), %ebx
+; FALLBACK26-NEXT: andl $24, %ebx
+; FALLBACK26-NEXT: movl %ebx, %ecx
; FALLBACK26-NEXT: andl $60, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: leal {{[0-9]+}}(%esp), %edi
-; FALLBACK26-NEXT: subl %eax, %edi
-; FALLBACK26-NEXT: movl (%edi), %ecx
-; FALLBACK26-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: movl 4(%edi), %eax
+; FALLBACK26-NEXT: leal {{[0-9]+}}(%esp), %edx
+; FALLBACK26-NEXT: subl %eax, %edx
+; FALLBACK26-NEXT: movl (%edx), %esi
+; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl 4(%edx), %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: movl %edx, %ebx
; FALLBACK26-NEXT: notb %bl
-; FALLBACK26-NEXT: shrl %ecx
-; FALLBACK26-NEXT: shrxl %ebx, %ecx, %esi
-; FALLBACK26-NEXT: shlxl %edx, %eax, %ecx
-; FALLBACK26-NEXT: orl %ecx, %esi
+; FALLBACK26-NEXT: shrl %esi
+; FALLBACK26-NEXT: shrxl %ebx, %esi, %edi
+; FALLBACK26-NEXT: shlxl %ecx, %eax, %esi
+; FALLBACK26-NEXT: orl %esi, %edi
+; FALLBACK26-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl 8(%edx), %esi
; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: movl 8(%edi), %esi
-; FALLBACK26-NEXT: movl %esi, %ecx
-; FALLBACK26-NEXT: shrl %ecx
-; FALLBACK26-NEXT: shrxl %ebx, %ecx, %eax
-; FALLBACK26-NEXT: movl 12(%edi), %ecx
-; FALLBACK26-NEXT: shlxl %edx, %ecx, %ebp
-; FALLBACK26-NEXT: orl %ebp, %eax
-; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shlxl %edx, %esi, %esi
-; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK26-NEXT: shrl %eax
-; FALLBACK26-NEXT: shrxl %ebx, %eax, %eax
-; FALLBACK26-NEXT: orl %esi, %eax
-; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: movl 16(%edi), %eax
-; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shrl %eax
-; FALLBACK26-NEXT: shrxl %ebx, %eax, %eax
-; FALLBACK26-NEXT: movl 20(%edi), %esi
-; FALLBACK26-NEXT: shlxl %edx, %esi, %ebp
+; FALLBACK26-NEXT: shrl %esi
+; FALLBACK26-NEXT: shrxl %ebx, %esi, %eax
+; FALLBACK26-NEXT: movl 12(%edx), %esi
+; FALLBACK26-NEXT: shlxl %ecx, %esi, %ebp
; FALLBACK26-NEXT: orl %ebp, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK26-NEXT: movl %ecx, %edi
+; FALLBACK26-NEXT: shlxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; FALLBACK26-NEXT: shrl %ecx
; FALLBACK26-NEXT: shrxl %ebx, %ecx, %ecx
; FALLBACK26-NEXT: orl %eax, %ecx
; FALLBACK26-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: movl 24(%edi), %ecx
+; FALLBACK26-NEXT: movl 16(%edx), %ecx
; FALLBACK26-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK26-NEXT: shrl %ecx
; FALLBACK26-NEXT: shrxl %ebx, %ecx, %eax
-; FALLBACK26-NEXT: movl 28(%edi), %ecx
-; FALLBACK26-NEXT: shlxl %edx, %ecx, %ebp
+; FALLBACK26-NEXT: movl 20(%edx), %ecx
+; FALLBACK26-NEXT: shlxl %edi, %ecx, %ebp
; FALLBACK26-NEXT: orl %ebp, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK26-NEXT: shlxl %edi, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; FALLBACK26-NEXT: shrl %esi
-; FALLBACK26-NEXT: shrxl %ebx, %esi, %esi
-; FALLBACK26-NEXT: orl %eax, %esi
-; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: movl 32(%edi), %eax
+; FALLBACK26-NEXT: shrxl %ebx, %esi, %eax
+; FALLBACK26-NEXT: orl %ebp, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shrl %eax
-; FALLBACK26-NEXT: shrxl %ebx, %eax, %eax
-; FALLBACK26-NEXT: movl 36(%edi), %esi
-; FALLBACK26-NEXT: shlxl %edx, %esi, %ebp
+; FALLBACK26-NEXT: movl 24(%edx), %esi
+; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shrl %esi
+; FALLBACK26-NEXT: shrxl %ebx, %esi, %eax
+; FALLBACK26-NEXT: movl 28(%edx), %esi
+; FALLBACK26-NEXT: shlxl %edi, %esi, %ebp
; FALLBACK26-NEXT: orl %ebp, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK26-NEXT: shlxl %edi, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; FALLBACK26-NEXT: shrl %ecx
-; FALLBACK26-NEXT: shrxl %ebx, %ecx, %ecx
-; FALLBACK26-NEXT: orl %eax, %ecx
-; FALLBACK26-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: movl 40(%edi), %ecx
+; FALLBACK26-NEXT: shrxl %ebx, %ecx, %eax
+; FALLBACK26-NEXT: orl %ebp, %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl 32(%edx), %ecx
; FALLBACK26-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK26-NEXT: shrl %ecx
; FALLBACK26-NEXT: shrxl %ebx, %ecx, %eax
-; FALLBACK26-NEXT: movl 44(%edi), %ecx
-; FALLBACK26-NEXT: shlxl %edx, %ecx, %ebp
+; FALLBACK26-NEXT: movl 36(%edx), %ecx
+; FALLBACK26-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shlxl %edi, %ecx, %ebp
; FALLBACK26-NEXT: orl %ebp, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK26-NEXT: shlxl %edi, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; FALLBACK26-NEXT: movl %edi, %eax
; FALLBACK26-NEXT: shrl %esi
; FALLBACK26-NEXT: shrxl %ebx, %esi, %esi
-; FALLBACK26-NEXT: orl %eax, %esi
-; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: movl 48(%edi), %esi
+; FALLBACK26-NEXT: orl %ebp, %esi
; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl 40(%edx), %edi
+; FALLBACK26-NEXT: movl %edi, %esi
; FALLBACK26-NEXT: shrl %esi
-; FALLBACK26-NEXT: shrxl %ebx, %esi, %eax
-; FALLBACK26-NEXT: movl 52(%edi), %esi
-; FALLBACK26-NEXT: shlxl %edx, %esi, %ebp
-; FALLBACK26-NEXT: orl %ebp, %eax
+; FALLBACK26-NEXT: shrxl %ebx, %esi, %ecx
+; FALLBACK26-NEXT: movl 44(%edx), %esi
+; FALLBACK26-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shlxl %eax, %esi, %ebp
+; FALLBACK26-NEXT: orl %ebp, %ecx
+; FALLBACK26-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shlxl %eax, %edi, %edi
+; FALLBACK26-NEXT: movl %eax, %esi
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: shrl %eax
+; FALLBACK26-NEXT: shrxl %ebx, %eax, %eax
+; FALLBACK26-NEXT: orl %edi, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; FALLBACK26-NEXT: shrl %ecx
-; FALLBACK26-NEXT: shrxl %ebx, %ecx, %ebp
-; FALLBACK26-NEXT: orl %eax, %ebp
-; FALLBACK26-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK26-NEXT: movl 48(%edx), %ebp
+; FALLBACK26-NEXT: movl %ebp, %edi
+; FALLBACK26-NEXT: shrl %edi
+; FALLBACK26-NEXT: shrxl %ebx, %edi, %eax
+; FALLBACK26-NEXT: movl 52(%edx), %ecx
+; FALLBACK26-NEXT: shlxl %esi, %ecx, %edi
+; FALLBACK26-NEXT: orl %edi, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shlxl %esi, %ebp, %edi
+; FALLBACK26-NEXT: movl %esi, %ebp
; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK26-NEXT: negl %eax
-; FALLBACK26-NEXT: shlxl %edx, 188(%esp,%eax), %ecx
-; FALLBACK26-NEXT: movl 56(%edi), %eax
-; FALLBACK26-NEXT: shlxl %edx, %eax, %edx
-; FALLBACK26-NEXT: shrl %esi
-; FALLBACK26-NEXT: shrxl %ebx, %esi, %esi
-; FALLBACK26-NEXT: orl %edx, %esi
; FALLBACK26-NEXT: shrl %eax
-; FALLBACK26-NEXT: shrxl %ebx, %eax, %eax
-; FALLBACK26-NEXT: orl %eax, %ecx
-; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %eax
-; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; FALLBACK26-NEXT: movl %edx, (%eax)
-; FALLBACK26-NEXT: movl %esi, 56(%eax)
-; FALLBACK26-NEXT: movl %ecx, 60(%eax)
-; FALLBACK26-NEXT: movl %ebp, 48(%eax)
-; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK26-NEXT: movl %ecx, 52(%eax)
-; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK26-NEXT: movl %ecx, 40(%eax)
-; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK26-NEXT: movl %ecx, 44(%eax)
-; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK26-NEXT: movl %ecx, 32(%eax)
-; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK26-NEXT: movl %ecx, 36(%eax)
-; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK26-NEXT: movl %ecx, 24(%eax)
-; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK26-NEXT: movl %ecx, 28(%eax)
-; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK26-NEXT: movl %ecx, 16(%eax)
-; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK26-NEXT: movl %ecx, 20(%eax)
-; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK26-NEXT: movl %ecx, 8(%eax)
-; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK26-NEXT: movl %ecx, 12(%eax)
-; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK26-NEXT: movl %ecx, 4(%eax)
+; FALLBACK26-NEXT: shrxl %ebx, %eax, %esi
+; FALLBACK26-NEXT: orl %edi, %esi
+; FALLBACK26-NEXT: movl 56(%edx), %edi
+; FALLBACK26-NEXT: shrl %ecx
+; FALLBACK26-NEXT: shrxl %ebx, %ecx, %eax
+; FALLBACK26-NEXT: shlxl %ebp, %edi, %ecx
+; FALLBACK26-NEXT: orl %ecx, %eax
+; FALLBACK26-NEXT: shrl %edi
+; FALLBACK26-NEXT: shrxl %ebx, %edi, %ecx
+; FALLBACK26-NEXT: shlxl %ebp, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; FALLBACK26-NEXT: negl %ebx
+; FALLBACK26-NEXT: shlxl %ebp, 188(%esp,%ebx), %ebx
+; FALLBACK26-NEXT: orl %ecx, %ebx
+; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK26-NEXT: movl %edi, (%edx)
+; FALLBACK26-NEXT: movl %eax, 56(%edx)
+; FALLBACK26-NEXT: movl %ebx, 60(%edx)
+; FALLBACK26-NEXT: movl %esi, 48(%edx)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, 52(%edx)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, 40(%edx)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, 44(%edx)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, 32(%edx)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, 36(%edx)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, 24(%edx)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, 28(%edx)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, 16(%edx)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, 20(%edx)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, 8(%edx)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, 12(%edx)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl %eax, 4(%edx)
; FALLBACK26-NEXT: addl $204, %esp
; FALLBACK26-NEXT: popl %esi
; FALLBACK26-NEXT: popl %edi
@@ -19531,144 +19502,150 @@ define void @shl_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK30-NEXT: vxorps %xmm1, %xmm1, %xmm1
; FALLBACK30-NEXT: vmovups %zmm1, {{[0-9]+}}(%esp)
; FALLBACK30-NEXT: vmovups %zmm0, {{[0-9]+}}(%esp)
-; FALLBACK30-NEXT: leal (,%eax,8), %edx
-; FALLBACK30-NEXT: andl $24, %edx
+; FALLBACK30-NEXT: leal (,%eax,8), %ebx
+; FALLBACK30-NEXT: andl $24, %ebx
+; FALLBACK30-NEXT: movl %ebx, %ecx
; FALLBACK30-NEXT: andl $60, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: leal {{[0-9]+}}(%esp), %edi
-; FALLBACK30-NEXT: subl %eax, %edi
-; FALLBACK30-NEXT: movl (%edi), %ecx
-; FALLBACK30-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: movl 4(%edi), %eax
+; FALLBACK30-NEXT: leal {{[0-9]+}}(%esp), %edx
+; FALLBACK30-NEXT: subl %eax, %edx
+; FALLBACK30-NEXT: movl (%edx), %esi
+; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl 4(%edx), %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: movl %edx, %ebx
; FALLBACK30-NEXT: notb %bl
-; FALLBACK30-NEXT: shrl %ecx
-; FALLBACK30-NEXT: shrxl %ebx, %ecx, %esi
-; FALLBACK30-NEXT: shlxl %edx, %eax, %ecx
-; FALLBACK30-NEXT: orl %ecx, %esi
+; FALLBACK30-NEXT: shrl %esi
+; FALLBACK30-NEXT: shrxl %ebx, %esi, %edi
+; FALLBACK30-NEXT: shlxl %ecx, %eax, %esi
+; FALLBACK30-NEXT: orl %esi, %edi
+; FALLBACK30-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl 8(%edx), %esi
; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: movl 8(%edi), %esi
-; FALLBACK30-NEXT: movl %esi, %ecx
-; FALLBACK30-NEXT: shrl %ecx
-; FALLBACK30-NEXT: shrxl %ebx, %ecx, %eax
-; FALLBACK30-NEXT: movl 12(%edi), %ecx
-; FALLBACK30-NEXT: shlxl %edx, %ecx, %ebp
-; FALLBACK30-NEXT: orl %ebp, %eax
-; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shlxl %edx, %esi, %esi
-; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK30-NEXT: shrl %eax
-; FALLBACK30-NEXT: shrxl %ebx, %eax, %eax
-; FALLBACK30-NEXT: orl %esi, %eax
-; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: movl 16(%edi), %eax
-; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shrl %eax
-; FALLBACK30-NEXT: shrxl %ebx, %eax, %eax
-; FALLBACK30-NEXT: movl 20(%edi), %esi
-; FALLBACK30-NEXT: shlxl %edx, %esi, %ebp
+; FALLBACK30-NEXT: shrl %esi
+; FALLBACK30-NEXT: shrxl %ebx, %esi, %eax
+; FALLBACK30-NEXT: movl 12(%edx), %esi
+; FALLBACK30-NEXT: shlxl %ecx, %esi, %ebp
; FALLBACK30-NEXT: orl %ebp, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK30-NEXT: movl %ecx, %edi
+; FALLBACK30-NEXT: shlxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; FALLBACK30-NEXT: shrl %ecx
; FALLBACK30-NEXT: shrxl %ebx, %ecx, %ecx
; FALLBACK30-NEXT: orl %eax, %ecx
; FALLBACK30-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: movl 24(%edi), %ecx
+; FALLBACK30-NEXT: movl 16(%edx), %ecx
; FALLBACK30-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK30-NEXT: shrl %ecx
; FALLBACK30-NEXT: shrxl %ebx, %ecx, %eax
-; FALLBACK30-NEXT: movl 28(%edi), %ecx
-; FALLBACK30-NEXT: shlxl %edx, %ecx, %ebp
+; FALLBACK30-NEXT: movl 20(%edx), %ecx
+; FALLBACK30-NEXT: shlxl %edi, %ecx, %ebp
; FALLBACK30-NEXT: orl %ebp, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK30-NEXT: shlxl %edi, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; FALLBACK30-NEXT: shrl %esi
-; FALLBACK30-NEXT: shrxl %ebx, %esi, %esi
-; FALLBACK30-NEXT: orl %eax, %esi
-; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: movl 32(%edi), %eax
+; FALLBACK30-NEXT: shrxl %ebx, %esi, %eax
+; FALLBACK30-NEXT: orl %ebp, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shrl %eax
-; FALLBACK30-NEXT: shrxl %ebx, %eax, %eax
-; FALLBACK30-NEXT: movl 36(%edi), %esi
-; FALLBACK30-NEXT: shlxl %edx, %esi, %ebp
+; FALLBACK30-NEXT: movl 24(%edx), %esi
+; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shrl %esi
+; FALLBACK30-NEXT: shrxl %ebx, %esi, %eax
+; FALLBACK30-NEXT: movl 28(%edx), %esi
+; FALLBACK30-NEXT: shlxl %edi, %esi, %ebp
; FALLBACK30-NEXT: orl %ebp, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK30-NEXT: shlxl %edi, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; FALLBACK30-NEXT: shrl %ecx
-; FALLBACK30-NEXT: shrxl %ebx, %ecx, %ecx
-; FALLBACK30-NEXT: orl %eax, %ecx
-; FALLBACK30-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: movl 40(%edi), %ecx
+; FALLBACK30-NEXT: shrxl %ebx, %ecx, %eax
+; FALLBACK30-NEXT: orl %ebp, %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl 32(%edx), %ecx
; FALLBACK30-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK30-NEXT: shrl %ecx
; FALLBACK30-NEXT: shrxl %ebx, %ecx, %eax
-; FALLBACK30-NEXT: movl 44(%edi), %ecx
-; FALLBACK30-NEXT: shlxl %edx, %ecx, %ebp
+; FALLBACK30-NEXT: movl 36(%edx), %ecx
+; FALLBACK30-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shlxl %edi, %ecx, %ebp
; FALLBACK30-NEXT: orl %ebp, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK30-NEXT: shlxl %edi, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; FALLBACK30-NEXT: movl %edi, %eax
; FALLBACK30-NEXT: shrl %esi
; FALLBACK30-NEXT: shrxl %ebx, %esi, %esi
-; FALLBACK30-NEXT: orl %eax, %esi
-; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: movl 48(%edi), %esi
+; FALLBACK30-NEXT: orl %ebp, %esi
; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl 40(%edx), %edi
+; FALLBACK30-NEXT: movl %edi, %esi
; FALLBACK30-NEXT: shrl %esi
-; FALLBACK30-NEXT: shrxl %ebx, %esi, %eax
-; FALLBACK30-NEXT: movl 52(%edi), %esi
-; FALLBACK30-NEXT: shlxl %edx, %esi, %ebp
-; FALLBACK30-NEXT: orl %ebp, %eax
+; FALLBACK30-NEXT: shrxl %ebx, %esi, %ecx
+; FALLBACK30-NEXT: movl 44(%edx), %esi
+; FALLBACK30-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shlxl %eax, %esi, %ebp
+; FALLBACK30-NEXT: orl %ebp, %ecx
+; FALLBACK30-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shlxl %eax, %edi, %edi
+; FALLBACK30-NEXT: movl %eax, %esi
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: shrl %eax
+; FALLBACK30-NEXT: shrxl %ebx, %eax, %eax
+; FALLBACK30-NEXT: orl %edi, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
-; FALLBACK30-NEXT: shrl %ecx
-; FALLBACK30-NEXT: shrxl %ebx, %ecx, %ebp
-; FALLBACK30-NEXT: orl %eax, %ebp
-; FALLBACK30-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK30-NEXT: movl 48(%edx), %ebp
+; FALLBACK30-NEXT: movl %ebp, %edi
+; FALLBACK30-NEXT: shrl %edi
+; FALLBACK30-NEXT: shrxl %ebx, %edi, %eax
+; FALLBACK30-NEXT: movl 52(%edx), %ecx
+; FALLBACK30-NEXT: shlxl %esi, %ecx, %edi
+; FALLBACK30-NEXT: orl %edi, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shlxl %esi, %ebp, %edi
+; FALLBACK30-NEXT: movl %esi, %ebp
; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; FALLBACK30-NEXT: negl %eax
-; FALLBACK30-NEXT: shlxl %edx, 188(%esp,%eax), %ecx
-; FALLBACK30-NEXT: movl 56(%edi), %eax
-; FALLBACK30-NEXT: shlxl %edx, %eax, %edx
-; FALLBACK30-NEXT: shrl %esi
-; FALLBACK30-NEXT: shrxl %ebx, %esi, %esi
-; FALLBACK30-NEXT: orl %edx, %esi
; FALLBACK30-NEXT: shrl %eax
-; FALLBACK30-NEXT: shrxl %ebx, %eax, %eax
-; FALLBACK30-NEXT: orl %eax, %ecx
-; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %eax
-; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; FALLBACK30-NEXT: movl %edx, (%eax)
-; FALLBACK30-NEXT: movl %esi, 56(%eax)
-; FALLBACK30-NEXT: movl %ecx, 60(%eax)
-; FALLBACK30-NEXT: movl %ebp, 48(%eax)
-; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK30-NEXT: movl %ecx, 52(%eax)
-; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK30-NEXT: movl %ecx, 40(%eax)
-; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK30-NEXT: movl %ecx, 44(%eax)
-; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK30-NEXT: movl %ecx, 32(%eax)
-; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK30-NEXT: movl %ecx, 36(%eax)
-; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK30-NEXT: movl %ecx, 24(%eax)
-; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK30-NEXT: movl %ecx, 28(%eax)
-; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK30-NEXT: movl %ecx, 16(%eax)
-; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK30-NEXT: movl %ecx, 20(%eax)
-; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK30-NEXT: movl %ecx, 8(%eax)
-; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK30-NEXT: movl %ecx, 12(%eax)
-; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; FALLBACK30-NEXT: movl %ecx, 4(%eax)
+; FALLBACK30-NEXT: shrxl %ebx, %eax, %esi
+; FALLBACK30-NEXT: orl %edi, %esi
+; FALLBACK30-NEXT: movl 56(%edx), %edi
+; FALLBACK30-NEXT: shrl %ecx
+; FALLBACK30-NEXT: shrxl %ebx, %ecx, %eax
+; FALLBACK30-NEXT: shlxl %ebp, %edi, %ecx
+; FALLBACK30-NEXT: orl %ecx, %eax
+; FALLBACK30-NEXT: shrl %edi
+; FALLBACK30-NEXT: shrxl %ebx, %edi, %ecx
+; FALLBACK30-NEXT: shlxl %ebp, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; FALLBACK30-NEXT: negl %ebx
+; FALLBACK30-NEXT: shlxl %ebp, 188(%esp,%ebx), %ebx
+; FALLBACK30-NEXT: orl %ecx, %ebx
+; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %edx
+; FALLBACK30-NEXT: movl %edi, (%edx)
+; FALLBACK30-NEXT: movl %eax, 56(%edx)
+; FALLBACK30-NEXT: movl %ebx, 60(%edx)
+; FALLBACK30-NEXT: movl %esi, 48(%edx)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, 52(%edx)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, 40(%edx)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, 44(%edx)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, 32(%edx)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, 36(%edx)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, 24(%edx)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, 28(%edx)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, 16(%edx)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, 20(%edx)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, 8(%edx)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, 12(%edx)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl %eax, 4(%edx)
; FALLBACK30-NEXT: addl $204, %esp
; FALLBACK30-NEXT: popl %esi
; FALLBACK30-NEXT: popl %edi
@@ -20336,10 +20313,8 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
;
; FALLBACK2-LABEL: ashr_64bytes:
; FALLBACK2: # %bb.0:
-; FALLBACK2-NEXT: pushq %rbp
; FALLBACK2-NEXT: pushq %r15
; FALLBACK2-NEXT: pushq %r14
-; FALLBACK2-NEXT: pushq %r13
; FALLBACK2-NEXT: pushq %r12
; FALLBACK2-NEXT: pushq %rbx
; FALLBACK2-NEXT: pushq %rax
@@ -20371,60 +20346,58 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK2-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK2-NEXT: leal (,%rax,8), %ecx
; FALLBACK2-NEXT: andl $56, %ecx
+; FALLBACK2-NEXT: movl %ecx, %esi
; FALLBACK2-NEXT: andl $56, %eax
-; FALLBACK2-NEXT: movq -120(%rsp,%rax), %rdi
-; FALLBACK2-NEXT: movq -112(%rsp,%rax), %r9
-; FALLBACK2-NEXT: shrxq %rcx, %rdi, %rbx
-; FALLBACK2-NEXT: shrxq %rcx, -128(%rsp,%rax), %r13
-; FALLBACK2-NEXT: movq -104(%rsp,%rax), %rsi
-; FALLBACK2-NEXT: shrxq %rcx, %rsi, %r8
-; FALLBACK2-NEXT: movq -96(%rsp,%rax), %r10
-; FALLBACK2-NEXT: shrxq %rcx, %r9, %r11
-; FALLBACK2-NEXT: movq -88(%rsp,%rax), %r14
-; FALLBACK2-NEXT: shrxq %rcx, %r14, %r15
-; FALLBACK2-NEXT: shrxq %rcx, %r10, %rbp
-; FALLBACK2-NEXT: movl %ecx, %r12d
-; FALLBACK2-NEXT: notb %r12b
-; FALLBACK2-NEXT: addq %r9, %r9
-; FALLBACK2-NEXT: shlxq %r12, %r9, %r9
+; FALLBACK2-NEXT: movq -120(%rsp,%rax), %r8
+; FALLBACK2-NEXT: movq -112(%rsp,%rax), %r10
+; FALLBACK2-NEXT: shrxq %rsi, %r8, %r9
+; FALLBACK2-NEXT: notb %cl
+; FALLBACK2-NEXT: leaq (%r10,%r10), %rdi
+; FALLBACK2-NEXT: shlxq %rcx, %rdi, %rdi
+; FALLBACK2-NEXT: orq %r9, %rdi
+; FALLBACK2-NEXT: shrxq %rsi, -128(%rsp,%rax), %r9
+; FALLBACK2-NEXT: addq %r8, %r8
+; FALLBACK2-NEXT: shlxq %rcx, %r8, %r8
+; FALLBACK2-NEXT: orq %r9, %r8
+; FALLBACK2-NEXT: movq -104(%rsp,%rax), %r11
+; FALLBACK2-NEXT: shrxq %rsi, %r11, %rbx
+; FALLBACK2-NEXT: movq -96(%rsp,%rax), %r14
+; FALLBACK2-NEXT: leaq (%r14,%r14), %r9
+; FALLBACK2-NEXT: shlxq %rcx, %r9, %r9
; FALLBACK2-NEXT: orq %rbx, %r9
-; FALLBACK2-NEXT: addq %rdi, %rdi
-; FALLBACK2-NEXT: shlxq %r12, %rdi, %rdi
-; FALLBACK2-NEXT: orq %r13, %rdi
-; FALLBACK2-NEXT: movq -80(%rsp,%rax), %rbx
-; FALLBACK2-NEXT: shrxq %rcx, %rbx, %r13
-; FALLBACK2-NEXT: movq -72(%rsp,%rax), %rax
-; FALLBACK2-NEXT: sarxq %rcx, %rax, %rcx
+; FALLBACK2-NEXT: shrxq %rsi, %r10, %r10
+; FALLBACK2-NEXT: addq %r11, %r11
+; FALLBACK2-NEXT: shlxq %rcx, %r11, %r11
+; FALLBACK2-NEXT: orq %r10, %r11
+; FALLBACK2-NEXT: movq -88(%rsp,%rax), %r10
+; FALLBACK2-NEXT: shrxq %rsi, %r10, %rbx
+; FALLBACK2-NEXT: movq -80(%rsp,%rax), %r15
+; FALLBACK2-NEXT: leaq (%r15,%r15), %r12
+; FALLBACK2-NEXT: shlxq %rcx, %r12, %r12
+; FALLBACK2-NEXT: orq %rbx, %r12
+; FALLBACK2-NEXT: shrxq %rsi, %r14, %rbx
; FALLBACK2-NEXT: addq %r10, %r10
-; FALLBACK2-NEXT: shlxq %r12, %r10, %r10
-; FALLBACK2-NEXT: orq %r8, %r10
-; FALLBACK2-NEXT: addq %rsi, %rsi
-; FALLBACK2-NEXT: shlxq %r12, %rsi, %rsi
-; FALLBACK2-NEXT: orq %r11, %rsi
-; FALLBACK2-NEXT: leaq (%rbx,%rbx), %r8
-; FALLBACK2-NEXT: shlxq %r12, %r8, %r8
-; FALLBACK2-NEXT: orq %r15, %r8
-; FALLBACK2-NEXT: addq %r14, %r14
-; FALLBACK2-NEXT: shlxq %r12, %r14, %r11
-; FALLBACK2-NEXT: orq %rbp, %r11
-; FALLBACK2-NEXT: addq %rax, %rax
-; FALLBACK2-NEXT: shlxq %r12, %rax, %rax
-; FALLBACK2-NEXT: orq %r13, %rax
-; FALLBACK2-NEXT: movq %rcx, 56(%rdx)
-; FALLBACK2-NEXT: movq %rax, 48(%rdx)
-; FALLBACK2-NEXT: movq %r11, 32(%rdx)
-; FALLBACK2-NEXT: movq %r8, 40(%rdx)
-; FALLBACK2-NEXT: movq %rsi, 16(%rdx)
-; FALLBACK2-NEXT: movq %r10, 24(%rdx)
-; FALLBACK2-NEXT: movq %rdi, (%rdx)
-; FALLBACK2-NEXT: movq %r9, 8(%rdx)
+; FALLBACK2-NEXT: shlxq %rcx, %r10, %r10
+; FALLBACK2-NEXT: orq %rbx, %r10
+; FALLBACK2-NEXT: shrxq %rsi, %r15, %rbx
+; FALLBACK2-NEXT: movq -72(%rsp,%rax), %rax
+; FALLBACK2-NEXT: leaq (%rax,%rax), %r14
+; FALLBACK2-NEXT: shlxq %rcx, %r14, %rcx
+; FALLBACK2-NEXT: orq %rbx, %rcx
+; FALLBACK2-NEXT: sarxq %rsi, %rax, %rax
+; FALLBACK2-NEXT: movq %rax, 56(%rdx)
+; FALLBACK2-NEXT: movq %rcx, 48(%rdx)
+; FALLBACK2-NEXT: movq %r10, 32(%rdx)
+; FALLBACK2-NEXT: movq %r12, 40(%rdx)
+; FALLBACK2-NEXT: movq %r11, 16(%rdx)
+; FALLBACK2-NEXT: movq %r9, 24(%rdx)
+; FALLBACK2-NEXT: movq %r8, (%rdx)
+; FALLBACK2-NEXT: movq %rdi, 8(%rdx)
; FALLBACK2-NEXT: addq $8, %rsp
; FALLBACK2-NEXT: popq %rbx
; FALLBACK2-NEXT: popq %r12
-; FALLBACK2-NEXT: popq %r13
; FALLBACK2-NEXT: popq %r14
; FALLBACK2-NEXT: popq %r15
-; FALLBACK2-NEXT: popq %rbp
; FALLBACK2-NEXT: retq
;
; FALLBACK3-LABEL: ashr_64bytes:
@@ -20664,13 +20637,11 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
;
; FALLBACK6-LABEL: ashr_64bytes:
; FALLBACK6: # %bb.0:
-; FALLBACK6-NEXT: pushq %rbp
; FALLBACK6-NEXT: pushq %r15
; FALLBACK6-NEXT: pushq %r14
; FALLBACK6-NEXT: pushq %r13
; FALLBACK6-NEXT: pushq %r12
; FALLBACK6-NEXT: pushq %rbx
-; FALLBACK6-NEXT: pushq %rax
; FALLBACK6-NEXT: movups (%rdi), %xmm0
; FALLBACK6-NEXT: movups 16(%rdi), %xmm1
; FALLBACK6-NEXT: movups 32(%rdi), %xmm2
@@ -20691,62 +20662,60 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK6-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK6-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
-; FALLBACK6-NEXT: leal (,%rax,8), %esi
-; FALLBACK6-NEXT: andl $56, %esi
+; FALLBACK6-NEXT: leal (,%rax,8), %ecx
+; FALLBACK6-NEXT: andl $56, %ecx
+; FALLBACK6-NEXT: movl %ecx, %esi
; FALLBACK6-NEXT: andl $56, %eax
-; FALLBACK6-NEXT: shrxq %rsi, -128(%rsp,%rax), %r11
-; FALLBACK6-NEXT: movq -112(%rsp,%rax), %rcx
-; FALLBACK6-NEXT: movq -104(%rsp,%rax), %rdi
-; FALLBACK6-NEXT: shrxq %rsi, %rdi, %r12
-; FALLBACK6-NEXT: movq -96(%rsp,%rax), %r13
-; FALLBACK6-NEXT: shrxq %rsi, %rcx, %r9
-; FALLBACK6-NEXT: movq -88(%rsp,%rax), %r10
-; FALLBACK6-NEXT: shrxq %rsi, %r10, %r14
-; FALLBACK6-NEXT: shrxq %rsi, %r13, %r15
-; FALLBACK6-NEXT: movl %esi, %ebx
-; FALLBACK6-NEXT: notb %bl
-; FALLBACK6-NEXT: movq -120(%rsp,%rax), %rbp
-; FALLBACK6-NEXT: leaq (%rbp,%rbp), %r8
-; FALLBACK6-NEXT: shlxq %rbx, %r8, %r8
-; FALLBACK6-NEXT: orq %r11, %r8
-; FALLBACK6-NEXT: leaq (%r13,%r13), %r11
-; FALLBACK6-NEXT: shlxq %rbx, %r11, %r11
-; FALLBACK6-NEXT: orq %r12, %r11
+; FALLBACK6-NEXT: shrxq %rsi, -128(%rsp,%rax), %r8
+; FALLBACK6-NEXT: notb %cl
+; FALLBACK6-NEXT: movq -120(%rsp,%rax), %r10
+; FALLBACK6-NEXT: movq -112(%rsp,%rax), %r9
+; FALLBACK6-NEXT: leaq (%r10,%r10), %rdi
+; FALLBACK6-NEXT: shlxq %rcx, %rdi, %rdi
+; FALLBACK6-NEXT: orq %r8, %rdi
+; FALLBACK6-NEXT: movq -104(%rsp,%rax), %r11
+; FALLBACK6-NEXT: shrxq %rsi, %r11, %rbx
+; FALLBACK6-NEXT: movq -96(%rsp,%rax), %r14
+; FALLBACK6-NEXT: leaq (%r14,%r14), %r8
+; FALLBACK6-NEXT: shlxq %rcx, %r8, %r8
+; FALLBACK6-NEXT: orq %rbx, %r8
+; FALLBACK6-NEXT: shrxq %rsi, %r9, %rbx
+; FALLBACK6-NEXT: addq %r11, %r11
+; FALLBACK6-NEXT: shlxq %rcx, %r11, %r11
+; FALLBACK6-NEXT: orq %rbx, %r11
+; FALLBACK6-NEXT: movq -88(%rsp,%rax), %rbx
+; FALLBACK6-NEXT: shrxq %rsi, %rbx, %r15
; FALLBACK6-NEXT: movq -80(%rsp,%rax), %r12
-; FALLBACK6-NEXT: shrxq %rsi, %r12, %r13
-; FALLBACK6-NEXT: shrxq %rsi, %rbp, %rbp
+; FALLBACK6-NEXT: leaq (%r12,%r12), %r13
+; FALLBACK6-NEXT: shlxq %rcx, %r13, %r13
+; FALLBACK6-NEXT: orq %r15, %r13
+; FALLBACK6-NEXT: shrxq %rsi, %r14, %r14
+; FALLBACK6-NEXT: addq %rbx, %rbx
+; FALLBACK6-NEXT: shlxq %rcx, %rbx, %rbx
+; FALLBACK6-NEXT: orq %r14, %rbx
+; FALLBACK6-NEXT: shrxq %rsi, %r12, %r14
; FALLBACK6-NEXT: movq -72(%rsp,%rax), %rax
-; FALLBACK6-NEXT: sarxq %rsi, %rax, %rsi
-; FALLBACK6-NEXT: addq %rdi, %rdi
-; FALLBACK6-NEXT: shlxq %rbx, %rdi, %rdi
-; FALLBACK6-NEXT: orq %r9, %rdi
-; FALLBACK6-NEXT: leaq (%r12,%r12), %r9
-; FALLBACK6-NEXT: shlxq %rbx, %r9, %r9
-; FALLBACK6-NEXT: orq %r14, %r9
-; FALLBACK6-NEXT: addq %r10, %r10
-; FALLBACK6-NEXT: shlxq %rbx, %r10, %r10
-; FALLBACK6-NEXT: orq %r15, %r10
-; FALLBACK6-NEXT: addq %rax, %rax
-; FALLBACK6-NEXT: shlxq %rbx, %rax, %rax
-; FALLBACK6-NEXT: orq %r13, %rax
-; FALLBACK6-NEXT: addq %rcx, %rcx
-; FALLBACK6-NEXT: shlxq %rbx, %rcx, %rcx
-; FALLBACK6-NEXT: orq %rbp, %rcx
-; FALLBACK6-NEXT: movq %rsi, 56(%rdx)
+; FALLBACK6-NEXT: leaq (%rax,%rax), %r15
+; FALLBACK6-NEXT: shlxq %rcx, %r15, %r15
+; FALLBACK6-NEXT: orq %r14, %r15
+; FALLBACK6-NEXT: shrxq %rsi, %r10, %r10
+; FALLBACK6-NEXT: addq %r9, %r9
+; FALLBACK6-NEXT: shlxq %rcx, %r9, %rcx
+; FALLBACK6-NEXT: orq %r10, %rcx
+; FALLBACK6-NEXT: sarxq %rsi, %rax, %rax
+; FALLBACK6-NEXT: movq %rax, 56(%rdx)
; FALLBACK6-NEXT: movq %rcx, 8(%rdx)
-; FALLBACK6-NEXT: movq %rax, 48(%rdx)
-; FALLBACK6-NEXT: movq %r10, 32(%rdx)
-; FALLBACK6-NEXT: movq %r9, 40(%rdx)
-; FALLBACK6-NEXT: movq %rdi, 16(%rdx)
-; FALLBACK6-NEXT: movq %r11, 24(%rdx)
-; FALLBACK6-NEXT: movq %r8, (%rdx)
-; FALLBACK6-NEXT: addq $8, %rsp
+; FALLBACK6-NEXT: movq %r15, 48(%rdx)
+; FALLBACK6-NEXT: movq %rbx, 32(%rdx)
+; FALLBACK6-NEXT: movq %r13, 40(%rdx)
+; FALLBACK6-NEXT: movq %r11, 16(%rdx)
+; FALLBACK6-NEXT: movq %r8, 24(%rdx)
+; FALLBACK6-NEXT: movq %rdi, (%rdx)
; FALLBACK6-NEXT: popq %rbx
; FALLBACK6-NEXT: popq %r12
; FALLBACK6-NEXT: popq %r13
; FALLBACK6-NEXT: popq %r14
; FALLBACK6-NEXT: popq %r15
-; FALLBACK6-NEXT: popq %rbp
; FALLBACK6-NEXT: retq
;
; FALLBACK7-LABEL: ashr_64bytes:
@@ -20979,13 +20948,11 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
;
; FALLBACK10-LABEL: ashr_64bytes:
; FALLBACK10: # %bb.0:
-; FALLBACK10-NEXT: pushq %rbp
; FALLBACK10-NEXT: pushq %r15
; FALLBACK10-NEXT: pushq %r14
; FALLBACK10-NEXT: pushq %r13
; FALLBACK10-NEXT: pushq %r12
; FALLBACK10-NEXT: pushq %rbx
-; FALLBACK10-NEXT: pushq %rax
; FALLBACK10-NEXT: vmovups (%rdi), %ymm0
; FALLBACK10-NEXT: vmovups 32(%rdi), %xmm1
; FALLBACK10-NEXT: movq 48(%rdi), %rcx
@@ -21004,62 +20971,60 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK10-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK10-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK10-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
-; FALLBACK10-NEXT: leal (,%rax,8), %esi
-; FALLBACK10-NEXT: andl $56, %esi
+; FALLBACK10-NEXT: leal (,%rax,8), %ecx
+; FALLBACK10-NEXT: andl $56, %ecx
+; FALLBACK10-NEXT: movl %ecx, %esi
; FALLBACK10-NEXT: andl $56, %eax
-; FALLBACK10-NEXT: shrxq %rsi, -128(%rsp,%rax), %r11
-; FALLBACK10-NEXT: movq -112(%rsp,%rax), %rcx
-; FALLBACK10-NEXT: movq -104(%rsp,%rax), %rdi
-; FALLBACK10-NEXT: shrxq %rsi, %rdi, %r12
-; FALLBACK10-NEXT: movq -96(%rsp,%rax), %r13
-; FALLBACK10-NEXT: shrxq %rsi, %rcx, %r9
-; FALLBACK10-NEXT: movq -88(%rsp,%rax), %r10
-; FALLBACK10-NEXT: shrxq %rsi, %r10, %r14
-; FALLBACK10-NEXT: shrxq %rsi, %r13, %r15
-; FALLBACK10-NEXT: movl %esi, %ebx
-; FALLBACK10-NEXT: notb %bl
-; FALLBACK10-NEXT: movq -120(%rsp,%rax), %rbp
-; FALLBACK10-NEXT: leaq (%rbp,%rbp), %r8
-; FALLBACK10-NEXT: shlxq %rbx, %r8, %r8
-; FALLBACK10-NEXT: orq %r11, %r8
-; FALLBACK10-NEXT: leaq (%r13,%r13), %r11
-; FALLBACK10-NEXT: shlxq %rbx, %r11, %r11
-; FALLBACK10-NEXT: orq %r12, %r11
+; FALLBACK10-NEXT: shrxq %rsi, -128(%rsp,%rax), %r8
+; FALLBACK10-NEXT: notb %cl
+; FALLBACK10-NEXT: movq -120(%rsp,%rax), %r10
+; FALLBACK10-NEXT: movq -112(%rsp,%rax), %r9
+; FALLBACK10-NEXT: leaq (%r10,%r10), %rdi
+; FALLBACK10-NEXT: shlxq %rcx, %rdi, %rdi
+; FALLBACK10-NEXT: orq %r8, %rdi
+; FALLBACK10-NEXT: movq -104(%rsp,%rax), %r11
+; FALLBACK10-NEXT: shrxq %rsi, %r11, %rbx
+; FALLBACK10-NEXT: movq -96(%rsp,%rax), %r14
+; FALLBACK10-NEXT: leaq (%r14,%r14), %r8
+; FALLBACK10-NEXT: shlxq %rcx, %r8, %r8
+; FALLBACK10-NEXT: orq %rbx, %r8
+; FALLBACK10-NEXT: shrxq %rsi, %r9, %rbx
+; FALLBACK10-NEXT: addq %r11, %r11
+; FALLBACK10-NEXT: shlxq %rcx, %r11, %r11
+; FALLBACK10-NEXT: orq %rbx, %r11
+; FALLBACK10-NEXT: movq -88(%rsp,%rax), %rbx
+; FALLBACK10-NEXT: shrxq %rsi, %rbx, %r15
; FALLBACK10-NEXT: movq -80(%rsp,%rax), %r12
-; FALLBACK10-NEXT: shrxq %rsi, %r12, %r13
-; FALLBACK10-NEXT: shrxq %rsi, %rbp, %rbp
+; FALLBACK10-NEXT: leaq (%r12,%r12), %r13
+; FALLBACK10-NEXT: shlxq %rcx, %r13, %r13
+; FALLBACK10-NEXT: orq %r15, %r13
+; FALLBACK10-NEXT: shrxq %rsi, %r14, %r14
+; FALLBACK10-NEXT: addq %rbx, %rbx
+; FALLBACK10-NEXT: shlxq %rcx, %rbx, %rbx
+; FALLBACK10-NEXT: orq %r14, %rbx
+; FALLBACK10-NEXT: shrxq %rsi, %r12, %r14
; FALLBACK10-NEXT: movq -72(%rsp,%rax), %rax
-; FALLBACK10-NEXT: sarxq %rsi, %rax, %rsi
-; FALLBACK10-NEXT: addq %rdi, %rdi
-; FALLBACK10-NEXT: shlxq %rbx, %rdi, %rdi
-; FALLBACK10-NEXT: orq %r9, %rdi
-; FALLBACK10-NEXT: leaq (%r12,%r12), %r9
-; FALLBACK10-NEXT: shlxq %rbx, %r9, %r9
-; FALLBACK10-NEXT: orq %r14, %r9
-; FALLBACK10-NEXT: addq %r10, %r10
-; FALLBACK10-NEXT: shlxq %rbx, %r10, %r10
-; FALLBACK10-NEXT: orq %r15, %r10
-; FALLBACK10-NEXT: addq %rax, %rax
-; FALLBACK10-NEXT: shlxq %rbx, %rax, %rax
-; FALLBACK10-NEXT: orq %r13, %rax
-; FALLBACK10-NEXT: addq %rcx, %rcx
-; FALLBACK10-NEXT: shlxq %rbx, %rcx, %rcx
-; FALLBACK10-NEXT: orq %rbp, %rcx
-; FALLBACK10-NEXT: movq %rsi, 56(%rdx)
+; FALLBACK10-NEXT: leaq (%rax,%rax), %r15
+; FALLBACK10-NEXT: shlxq %rcx, %r15, %r15
+; FALLBACK10-NEXT: orq %r14, %r15
+; FALLBACK10-NEXT: shrxq %rsi, %r10, %r10
+; FALLBACK10-NEXT: addq %r9, %r9
+; FALLBACK10-NEXT: shlxq %rcx, %r9, %rcx
+; FALLBACK10-NEXT: orq %r10, %rcx
+; FALLBACK10-NEXT: sarxq %rsi, %rax, %rax
+; FALLBACK10-NEXT: movq %rax, 56(%rdx)
; FALLBACK10-NEXT: movq %rcx, 8(%rdx)
-; FALLBACK10-NEXT: movq %rax, 48(%rdx)
-; FALLBACK10-NEXT: movq %r10, 32(%rdx)
-; FALLBACK10-NEXT: movq %r9, 40(%rdx)
-; FALLBACK10-NEXT: movq %rdi, 16(%rdx)
-; FALLBACK10-NEXT: movq %r11, 24(%rdx)
-; FALLBACK10-NEXT: movq %r8, (%rdx)
-; FALLBACK10-NEXT: addq $8, %rsp
+; FALLBACK10-NEXT: movq %r15, 48(%rdx)
+; FALLBACK10-NEXT: movq %rbx, 32(%rdx)
+; FALLBACK10-NEXT: movq %r13, 40(%rdx)
+; FALLBACK10-NEXT: movq %r11, 16(%rdx)
+; FALLBACK10-NEXT: movq %r8, 24(%rdx)
+; FALLBACK10-NEXT: movq %rdi, (%rdx)
; FALLBACK10-NEXT: popq %rbx
; FALLBACK10-NEXT: popq %r12
; FALLBACK10-NEXT: popq %r13
; FALLBACK10-NEXT: popq %r14
; FALLBACK10-NEXT: popq %r15
-; FALLBACK10-NEXT: popq %rbp
; FALLBACK10-NEXT: vzeroupper
; FALLBACK10-NEXT: retq
;
@@ -21292,13 +21257,11 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
;
; FALLBACK14-LABEL: ashr_64bytes:
; FALLBACK14: # %bb.0:
-; FALLBACK14-NEXT: pushq %rbp
; FALLBACK14-NEXT: pushq %r15
; FALLBACK14-NEXT: pushq %r14
; FALLBACK14-NEXT: pushq %r13
; FALLBACK14-NEXT: pushq %r12
; FALLBACK14-NEXT: pushq %rbx
-; FALLBACK14-NEXT: pushq %rax
; FALLBACK14-NEXT: vmovups (%rdi), %ymm0
; FALLBACK14-NEXT: vmovups 32(%rdi), %xmm1
; FALLBACK14-NEXT: movq 48(%rdi), %rcx
@@ -21317,62 +21280,60 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK14-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK14-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; FALLBACK14-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
-; FALLBACK14-NEXT: leal (,%rax,8), %esi
-; FALLBACK14-NEXT: andl $56, %esi
+; FALLBACK14-NEXT: leal (,%rax,8), %ecx
+; FALLBACK14-NEXT: andl $56, %ecx
+; FALLBACK14-NEXT: movl %ecx, %esi
; FALLBACK14-NEXT: andl $56, %eax
-; FALLBACK14-NEXT: shrxq %rsi, -128(%rsp,%rax), %r11
-; FALLBACK14-NEXT: movq -112(%rsp,%rax), %rcx
-; FALLBACK14-NEXT: movq -104(%rsp,%rax), %rdi
-; FALLBACK14-NEXT: shrxq %rsi, %rdi, %r12
-; FALLBACK14-NEXT: movq -96(%rsp,%rax), %r13
-; FALLBACK14-NEXT: shrxq %rsi, %rcx, %r9
-; FALLBACK14-NEXT: movq -88(%rsp,%rax), %r10
-; FALLBACK14-NEXT: shrxq %rsi, %r10, %r14
-; FALLBACK14-NEXT: shrxq %rsi, %r13, %r15
-; FALLBACK14-NEXT: movl %esi, %ebx
-; FALLBACK14-NEXT: notb %bl
-; FALLBACK14-NEXT: movq -120(%rsp,%rax), %rbp
-; FALLBACK14-NEXT: leaq (%rbp,%rbp), %r8
-; FALLBACK14-NEXT: shlxq %rbx, %r8, %r8
-; FALLBACK14-NEXT: orq %r11, %r8
-; FALLBACK14-NEXT: leaq (%r13,%r13), %r11
-; FALLBACK14-NEXT: shlxq %rbx, %r11, %r11
-; FALLBACK14-NEXT: orq %r12, %r11
+; FALLBACK14-NEXT: shrxq %rsi, -128(%rsp,%rax), %r8
+; FALLBACK14-NEXT: notb %cl
+; FALLBACK14-NEXT: movq -120(%rsp,%rax), %r10
+; FALLBACK14-NEXT: movq -112(%rsp,%rax), %r9
+; FALLBACK14-NEXT: leaq (%r10,%r10), %rdi
+; FALLBACK14-NEXT: shlxq %rcx, %rdi, %rdi
+; FALLBACK14-NEXT: orq %r8, %rdi
+; FALLBACK14-NEXT: movq -104(%rsp,%rax), %r11
+; FALLBACK14-NEXT: shrxq %rsi, %r11, %rbx
+; FALLBACK14-NEXT: movq -96(%rsp,%rax), %r14
+; FALLBACK14-NEXT: leaq (%r14,%r14), %r8
+; FALLBACK14-NEXT: shlxq %rcx, %r8, %r8
+; FALLBACK14-NEXT: orq %rbx, %r8
+; FALLBACK14-NEXT: shrxq %rsi, %r9, %rbx
+; FALLBACK14-NEXT: addq %r11, %r11
+; FALLBACK14-NEXT: shlxq %rcx, %r11, %r11
+; FALLBACK14-NEXT: orq %rbx, %r11
+; FALLBACK14-NEXT: movq -88(%rsp,%rax), %rbx
+; FALLBACK14-NEXT: shrxq %rsi, %rbx, %r15
; FALLBACK14-NEXT: movq -80(%rsp,%rax), %r12
-; FALLBACK14-NEXT: shrxq %rsi, %r12, %r13
-; FALLBACK14-NEXT: shrxq %rsi, %rbp, %rbp
+; FALLBACK14-NEXT: leaq (%r12,%r12), %r13
+; FALLBACK14-NEXT: shlxq %rcx, %r13, %r13
+; FALLBACK14-NEXT: orq %r15, %r13
+; FALLBACK14-NEXT: shrxq %rsi, %r14, %r14
+; FALLBACK14-NEXT: addq %rbx, %rbx
+; FALLBACK14-NEXT: shlxq %rcx, %rbx, %rbx
+; FALLBACK14-NEXT: orq %r14, %rbx
+; FALLBACK14-NEXT: shrxq %rsi, %r12, %r14
; FALLBACK14-NEXT: movq -72(%rsp,%rax), %rax
-; FALLBACK14-NEXT: sarxq %rsi, %rax, %rsi
-; FALLBACK14-NEXT: addq %rdi, %rdi
-; FALLBACK14-NEXT: shlxq %rbx, %rdi, %rdi
-; FALLBACK14-NEXT: orq %r9, %rdi
-; FALLBACK14-NEXT: leaq (%r12,%r12), %r9
-; FALLBACK14-NEXT: shlxq %rbx, %r9, %r9
-; FALLBACK14-NEXT: orq %r14, %r9
-; FALLBACK14-NEXT: addq %r10, %r10
-; FALLBACK14-NEXT: shlxq %rbx, %r10, %r10
-; FALLBACK14-NEXT: orq %r15, %r10
-; FALLBACK14-NEXT: addq %rax, %rax
-; FALLBACK14-NEXT: shlxq %rbx, %rax, %rax
-; FALLBACK14-NEXT: orq %r13, %rax
-; FALLBACK14-NEXT: addq %rcx, %rcx
-; FALLBACK14-NEXT: shlxq %rbx, %rcx, %rcx
-; FALLBACK14-NEXT: orq %rbp, %rcx
-; FALLBACK14-NEXT: movq %rsi, 56(%rdx)
+; FALLBACK14-NEXT: leaq (%rax,%rax), %r15
+; FALLBACK14-NEXT: shlxq %rcx, %r15, %r15
+; FALLBACK14-NEXT: orq %r14, %r15
+; FALLBACK14-NEXT: shrxq %rsi, %r10, %r10
+; FALLBACK14-NEXT: addq %r9, %r9
+; FALLBACK14-NEXT: shlxq %rcx, %r9, %rcx
+; FALLBACK14-NEXT: orq %r10, %rcx
+; FALLBACK14-NEXT: sarxq %rsi, %rax, %rax
+; FALLBACK14-NEXT: movq %rax, 56(%rdx)
; FALLBACK14-NEXT: movq %rcx, 8(%rdx)
-; FALLBACK14-NEXT: movq %rax, 48(%rdx)
-; FALLBACK14-NEXT: movq %r10, 32(%rdx)
-; FALLBACK14-NEXT: movq %r9, 40(%rdx)
-; FALLBACK14-NEXT: movq %rdi, 16(%rdx)
-; FALLBACK14-NEXT: movq %r11, 24(%rdx)
-; FALLBACK14-NEXT: movq %r8, (%rdx)
-; FALLBACK14-NEXT: addq $8, %rsp
+; FALLBACK14-NEXT: movq %r15, 48(%rdx)
+; FALLBACK14-NEXT: movq %rbx, 32(%rdx)
+; FALLBACK14-NEXT: movq %r13, 40(%rdx)
+; FALLBACK14-NEXT: movq %r11, 16(%rdx)
+; FALLBACK14-NEXT: movq %r8, 24(%rdx)
+; FALLBACK14-NEXT: movq %rdi, (%rdx)
; FALLBACK14-NEXT: popq %rbx
; FALLBACK14-NEXT: popq %r12
; FALLBACK14-NEXT: popq %r13
; FALLBACK14-NEXT: popq %r14
; FALLBACK14-NEXT: popq %r15
-; FALLBACK14-NEXT: popq %rbp
; FALLBACK14-NEXT: vzeroupper
; FALLBACK14-NEXT: retq
;
@@ -21960,111 +21921,112 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK18-NEXT: movl %eax, %ecx
; FALLBACK18-NEXT: leal (,%eax,8), %edx
; FALLBACK18-NEXT: andl $24, %edx
+; FALLBACK18-NEXT: movl %edx, %ebx
; FALLBACK18-NEXT: andl $60, %ecx
; FALLBACK18-NEXT: movl 68(%esp,%ecx), %esi
; FALLBACK18-NEXT: movl 72(%esp,%ecx), %edi
; FALLBACK18-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shrxl %edx, %esi, %eax
+; FALLBACK18-NEXT: shrxl %ebx, %esi, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: movl %edx, %ebx
-; FALLBACK18-NEXT: notb %bl
+; FALLBACK18-NEXT: notb %dl
; FALLBACK18-NEXT: leal (%edi,%edi), %ebp
-; FALLBACK18-NEXT: shlxl %ebx, %ebp, %eax
+; FALLBACK18-NEXT: shlxl %edx, %ebp, %eax
; FALLBACK18-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shrxl %edx, 64(%esp,%ecx), %edi
+; FALLBACK18-NEXT: shrxl %ebx, 64(%esp,%ecx), %edi
; FALLBACK18-NEXT: addl %esi, %esi
-; FALLBACK18-NEXT: shlxl %ebx, %esi, %eax
+; FALLBACK18-NEXT: shlxl %edx, %esi, %eax
; FALLBACK18-NEXT: orl %edi, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK18-NEXT: movl 80(%esp,%ecx), %esi
; FALLBACK18-NEXT: leal (%esi,%esi), %edi
-; FALLBACK18-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK18-NEXT: shlxl %edx, %edi, %eax
; FALLBACK18-NEXT: movl 76(%esp,%ecx), %edi
-; FALLBACK18-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK18-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK18-NEXT: orl %ebp, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK18-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK18-NEXT: addl %edi, %edi
-; FALLBACK18-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK18-NEXT: shlxl %edx, %edi, %edi
; FALLBACK18-NEXT: orl %eax, %edi
; FALLBACK18-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK18-NEXT: movl 88(%esp,%ecx), %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK18-NEXT: leal (%eax,%eax), %edi
-; FALLBACK18-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK18-NEXT: shlxl %edx, %edi, %eax
; FALLBACK18-NEXT: movl 84(%esp,%ecx), %edi
-; FALLBACK18-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK18-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK18-NEXT: orl %ebp, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shrxl %edx, %esi, %esi
+; FALLBACK18-NEXT: shrxl %ebx, %esi, %esi
; FALLBACK18-NEXT: addl %edi, %edi
-; FALLBACK18-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK18-NEXT: shlxl %edx, %edi, %eax
; FALLBACK18-NEXT: orl %esi, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK18-NEXT: movl 96(%esp,%ecx), %esi
; FALLBACK18-NEXT: leal (%esi,%esi), %edi
-; FALLBACK18-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK18-NEXT: shlxl %edx, %edi, %eax
; FALLBACK18-NEXT: movl 92(%esp,%ecx), %edi
-; FALLBACK18-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK18-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK18-NEXT: orl %ebp, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK18-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK18-NEXT: addl %edi, %edi
-; FALLBACK18-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK18-NEXT: shlxl %edx, %edi, %edi
; FALLBACK18-NEXT: orl %eax, %edi
; FALLBACK18-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK18-NEXT: movl 104(%esp,%ecx), %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK18-NEXT: leal (%eax,%eax), %edi
-; FALLBACK18-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK18-NEXT: shlxl %edx, %edi, %eax
; FALLBACK18-NEXT: movl 100(%esp,%ecx), %edi
-; FALLBACK18-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK18-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK18-NEXT: orl %ebp, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shrxl %edx, %esi, %esi
+; FALLBACK18-NEXT: shrxl %ebx, %esi, %esi
; FALLBACK18-NEXT: addl %edi, %edi
-; FALLBACK18-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK18-NEXT: shlxl %edx, %edi, %eax
; FALLBACK18-NEXT: orl %esi, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl %ecx, %ebp
+; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK18-NEXT: movl 112(%esp,%ecx), %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK18-NEXT: leal (%eax,%eax), %esi
-; FALLBACK18-NEXT: shlxl %ebx, %esi, %eax
+; FALLBACK18-NEXT: shlxl %edx, %esi, %eax
; FALLBACK18-NEXT: movl 108(%esp,%ecx), %esi
-; FALLBACK18-NEXT: movl %ecx, %edi
-; FALLBACK18-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shrxl %edx, %esi, %ebp
-; FALLBACK18-NEXT: orl %ebp, %eax
+; FALLBACK18-NEXT: shrxl %ebx, %esi, %edi
+; FALLBACK18-NEXT: orl %edi, %eax
; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK18-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; FALLBACK18-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; FALLBACK18-NEXT: addl %esi, %esi
-; FALLBACK18-NEXT: shlxl %ebx, %esi, %esi
-; FALLBACK18-NEXT: orl %ecx, %esi
-; FALLBACK18-NEXT: movl 120(%esp,%edi), %ebp
-; FALLBACK18-NEXT: leal (%ebp,%ebp), %ecx
-; FALLBACK18-NEXT: shlxl %ebx, %ecx, %ecx
-; FALLBACK18-NEXT: movl 116(%esp,%edi), %eax
-; FALLBACK18-NEXT: shrxl %edx, %eax, %edi
-; FALLBACK18-NEXT: orl %edi, %ecx
-; FALLBACK18-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; FALLBACK18-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: shlxl %edx, %esi, %eax
+; FALLBACK18-NEXT: orl %ecx, %eax
+; FALLBACK18-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK18-NEXT: movl 120(%esp,%ebp), %edi
+; FALLBACK18-NEXT: leal (%edi,%edi), %ecx
+; FALLBACK18-NEXT: shlxl %edx, %ecx, %esi
+; FALLBACK18-NEXT: movl 116(%esp,%ebp), %eax
+; FALLBACK18-NEXT: shrxl %ebx, %eax, %ebp
+; FALLBACK18-NEXT: orl %ebp, %esi
+; FALLBACK18-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; FALLBACK18-NEXT: addl %eax, %eax
-; FALLBACK18-NEXT: shlxl %ebx, %eax, %edi
-; FALLBACK18-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; FALLBACK18-NEXT: shrxl %edx, %ebp, %eax
-; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; FALLBACK18-NEXT: movl 124(%esp,%ebp), %ebp
-; FALLBACK18-NEXT: sarxl %edx, %ebp, %edx
-; FALLBACK18-NEXT: addl %ebp, %ebp
-; FALLBACK18-NEXT: shlxl %ebx, %ebp, %ebx
-; FALLBACK18-NEXT: orl %eax, %ebx
+; FALLBACK18-NEXT: shlxl %edx, %eax, %ecx
+; FALLBACK18-NEXT: orl %ebp, %ecx
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK18-NEXT: movl 124(%esp,%eax), %eax
+; FALLBACK18-NEXT: leal (%eax,%eax), %ebp
+; FALLBACK18-NEXT: shlxl %edx, %ebp, %edx
+; FALLBACK18-NEXT: shrxl %ebx, %edi, %edi
+; FALLBACK18-NEXT: orl %edi, %edx
+; FALLBACK18-NEXT: sarxl %ebx, %eax, %edi
; FALLBACK18-NEXT: movl {{[0-9]+}}(%esp), %eax
-; FALLBACK18-NEXT: movl %edx, 60(%eax)
-; FALLBACK18-NEXT: movl %ebx, 56(%eax)
-; FALLBACK18-NEXT: movl %edi, 48(%eax)
-; FALLBACK18-NEXT: movl %ecx, 52(%eax)
-; FALLBACK18-NEXT: movl %esi, 40(%eax)
+; FALLBACK18-NEXT: movl %edi, 60(%eax)
+; FALLBACK18-NEXT: movl %edx, 56(%eax)
+; FALLBACK18-NEXT: movl %ecx, 48(%eax)
+; FALLBACK18-NEXT: movl %esi, 52(%eax)
+; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK18-NEXT: movl %ecx, 40(%eax)
; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; FALLBACK18-NEXT: movl %ecx, 44(%eax)
; FALLBACK18-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
@@ -22664,111 +22626,112 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK22-NEXT: movl %eax, %ecx
; FALLBACK22-NEXT: leal (,%eax,8), %edx
; FALLBACK22-NEXT: andl $24, %edx
+; FALLBACK22-NEXT: movl %edx, %ebx
; FALLBACK22-NEXT: andl $60, %ecx
; FALLBACK22-NEXT: movl 68(%esp,%ecx), %esi
; FALLBACK22-NEXT: movl 72(%esp,%ecx), %edi
; FALLBACK22-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shrxl %edx, %esi, %eax
+; FALLBACK22-NEXT: shrxl %ebx, %esi, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: movl %edx, %ebx
-; FALLBACK22-NEXT: notb %bl
+; FALLBACK22-NEXT: notb %dl
; FALLBACK22-NEXT: leal (%edi,%edi), %ebp
-; FALLBACK22-NEXT: shlxl %ebx, %ebp, %eax
+; FALLBACK22-NEXT: shlxl %edx, %ebp, %eax
; FALLBACK22-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shrxl %edx, 64(%esp,%ecx), %edi
+; FALLBACK22-NEXT: shrxl %ebx, 64(%esp,%ecx), %edi
; FALLBACK22-NEXT: addl %esi, %esi
-; FALLBACK22-NEXT: shlxl %ebx, %esi, %eax
+; FALLBACK22-NEXT: shlxl %edx, %esi, %eax
; FALLBACK22-NEXT: orl %edi, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK22-NEXT: movl 80(%esp,%ecx), %esi
; FALLBACK22-NEXT: leal (%esi,%esi), %edi
-; FALLBACK22-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK22-NEXT: shlxl %edx, %edi, %eax
; FALLBACK22-NEXT: movl 76(%esp,%ecx), %edi
-; FALLBACK22-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK22-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK22-NEXT: orl %ebp, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK22-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK22-NEXT: addl %edi, %edi
-; FALLBACK22-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK22-NEXT: shlxl %edx, %edi, %edi
; FALLBACK22-NEXT: orl %eax, %edi
; FALLBACK22-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK22-NEXT: movl 88(%esp,%ecx), %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK22-NEXT: leal (%eax,%eax), %edi
-; FALLBACK22-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK22-NEXT: shlxl %edx, %edi, %eax
; FALLBACK22-NEXT: movl 84(%esp,%ecx), %edi
-; FALLBACK22-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK22-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK22-NEXT: orl %ebp, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shrxl %edx, %esi, %esi
+; FALLBACK22-NEXT: shrxl %ebx, %esi, %esi
; FALLBACK22-NEXT: addl %edi, %edi
-; FALLBACK22-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK22-NEXT: shlxl %edx, %edi, %eax
; FALLBACK22-NEXT: orl %esi, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK22-NEXT: movl 96(%esp,%ecx), %esi
; FALLBACK22-NEXT: leal (%esi,%esi), %edi
-; FALLBACK22-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK22-NEXT: shlxl %edx, %edi, %eax
; FALLBACK22-NEXT: movl 92(%esp,%ecx), %edi
-; FALLBACK22-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK22-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK22-NEXT: orl %ebp, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK22-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK22-NEXT: addl %edi, %edi
-; FALLBACK22-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK22-NEXT: shlxl %edx, %edi, %edi
; FALLBACK22-NEXT: orl %eax, %edi
; FALLBACK22-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK22-NEXT: movl 104(%esp,%ecx), %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK22-NEXT: leal (%eax,%eax), %edi
-; FALLBACK22-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK22-NEXT: shlxl %edx, %edi, %eax
; FALLBACK22-NEXT: movl 100(%esp,%ecx), %edi
-; FALLBACK22-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK22-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK22-NEXT: orl %ebp, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shrxl %edx, %esi, %esi
+; FALLBACK22-NEXT: shrxl %ebx, %esi, %esi
; FALLBACK22-NEXT: addl %edi, %edi
-; FALLBACK22-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK22-NEXT: shlxl %edx, %edi, %eax
; FALLBACK22-NEXT: orl %esi, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl %ecx, %ebp
+; FALLBACK22-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK22-NEXT: movl 112(%esp,%ecx), %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK22-NEXT: leal (%eax,%eax), %esi
-; FALLBACK22-NEXT: shlxl %ebx, %esi, %eax
+; FALLBACK22-NEXT: shlxl %edx, %esi, %eax
; FALLBACK22-NEXT: movl 108(%esp,%ecx), %esi
-; FALLBACK22-NEXT: movl %ecx, %edi
-; FALLBACK22-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shrxl %edx, %esi, %ebp
-; FALLBACK22-NEXT: orl %ebp, %eax
+; FALLBACK22-NEXT: shrxl %ebx, %esi, %edi
+; FALLBACK22-NEXT: orl %edi, %eax
; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK22-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; FALLBACK22-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; FALLBACK22-NEXT: addl %esi, %esi
-; FALLBACK22-NEXT: shlxl %ebx, %esi, %esi
-; FALLBACK22-NEXT: orl %ecx, %esi
-; FALLBACK22-NEXT: movl 120(%esp,%edi), %ebp
-; FALLBACK22-NEXT: leal (%ebp,%ebp), %ecx
-; FALLBACK22-NEXT: shlxl %ebx, %ecx, %ecx
-; FALLBACK22-NEXT: movl 116(%esp,%edi), %eax
-; FALLBACK22-NEXT: shrxl %edx, %eax, %edi
-; FALLBACK22-NEXT: orl %edi, %ecx
-; FALLBACK22-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; FALLBACK22-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: shlxl %edx, %esi, %eax
+; FALLBACK22-NEXT: orl %ecx, %eax
+; FALLBACK22-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK22-NEXT: movl 120(%esp,%ebp), %edi
+; FALLBACK22-NEXT: leal (%edi,%edi), %ecx
+; FALLBACK22-NEXT: shlxl %edx, %ecx, %esi
+; FALLBACK22-NEXT: movl 116(%esp,%ebp), %eax
+; FALLBACK22-NEXT: shrxl %ebx, %eax, %ebp
+; FALLBACK22-NEXT: orl %ebp, %esi
+; FALLBACK22-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; FALLBACK22-NEXT: addl %eax, %eax
-; FALLBACK22-NEXT: shlxl %ebx, %eax, %edi
-; FALLBACK22-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; FALLBACK22-NEXT: shrxl %edx, %ebp, %eax
-; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; FALLBACK22-NEXT: movl 124(%esp,%ebp), %ebp
-; FALLBACK22-NEXT: sarxl %edx, %ebp, %edx
-; FALLBACK22-NEXT: addl %ebp, %ebp
-; FALLBACK22-NEXT: shlxl %ebx, %ebp, %ebx
-; FALLBACK22-NEXT: orl %eax, %ebx
+; FALLBACK22-NEXT: shlxl %edx, %eax, %ecx
+; FALLBACK22-NEXT: orl %ebp, %ecx
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK22-NEXT: movl 124(%esp,%eax), %eax
+; FALLBACK22-NEXT: leal (%eax,%eax), %ebp
+; FALLBACK22-NEXT: shlxl %edx, %ebp, %edx
+; FALLBACK22-NEXT: shrxl %ebx, %edi, %edi
+; FALLBACK22-NEXT: orl %edi, %edx
+; FALLBACK22-NEXT: sarxl %ebx, %eax, %edi
; FALLBACK22-NEXT: movl {{[0-9]+}}(%esp), %eax
-; FALLBACK22-NEXT: movl %edx, 60(%eax)
-; FALLBACK22-NEXT: movl %ebx, 56(%eax)
-; FALLBACK22-NEXT: movl %edi, 48(%eax)
-; FALLBACK22-NEXT: movl %ecx, 52(%eax)
-; FALLBACK22-NEXT: movl %esi, 40(%eax)
+; FALLBACK22-NEXT: movl %edi, 60(%eax)
+; FALLBACK22-NEXT: movl %edx, 56(%eax)
+; FALLBACK22-NEXT: movl %ecx, 48(%eax)
+; FALLBACK22-NEXT: movl %esi, 52(%eax)
+; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK22-NEXT: movl %ecx, 40(%eax)
; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; FALLBACK22-NEXT: movl %ecx, 44(%eax)
; FALLBACK22-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
@@ -23326,111 +23289,112 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK26-NEXT: movl %eax, %ecx
; FALLBACK26-NEXT: leal (,%eax,8), %edx
; FALLBACK26-NEXT: andl $24, %edx
+; FALLBACK26-NEXT: movl %edx, %ebx
; FALLBACK26-NEXT: andl $60, %ecx
; FALLBACK26-NEXT: movl 68(%esp,%ecx), %esi
; FALLBACK26-NEXT: movl 72(%esp,%ecx), %edi
; FALLBACK26-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shrxl %edx, %esi, %eax
+; FALLBACK26-NEXT: shrxl %ebx, %esi, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: movl %edx, %ebx
-; FALLBACK26-NEXT: notb %bl
+; FALLBACK26-NEXT: notb %dl
; FALLBACK26-NEXT: leal (%edi,%edi), %ebp
-; FALLBACK26-NEXT: shlxl %ebx, %ebp, %eax
+; FALLBACK26-NEXT: shlxl %edx, %ebp, %eax
; FALLBACK26-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shrxl %edx, 64(%esp,%ecx), %edi
+; FALLBACK26-NEXT: shrxl %ebx, 64(%esp,%ecx), %edi
; FALLBACK26-NEXT: addl %esi, %esi
-; FALLBACK26-NEXT: shlxl %ebx, %esi, %eax
+; FALLBACK26-NEXT: shlxl %edx, %esi, %eax
; FALLBACK26-NEXT: orl %edi, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK26-NEXT: movl 80(%esp,%ecx), %esi
; FALLBACK26-NEXT: leal (%esi,%esi), %edi
-; FALLBACK26-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK26-NEXT: shlxl %edx, %edi, %eax
; FALLBACK26-NEXT: movl 76(%esp,%ecx), %edi
-; FALLBACK26-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK26-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK26-NEXT: orl %ebp, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK26-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK26-NEXT: addl %edi, %edi
-; FALLBACK26-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK26-NEXT: shlxl %edx, %edi, %edi
; FALLBACK26-NEXT: orl %eax, %edi
; FALLBACK26-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK26-NEXT: movl 88(%esp,%ecx), %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK26-NEXT: leal (%eax,%eax), %edi
-; FALLBACK26-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK26-NEXT: shlxl %edx, %edi, %eax
; FALLBACK26-NEXT: movl 84(%esp,%ecx), %edi
-; FALLBACK26-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK26-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK26-NEXT: orl %ebp, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shrxl %edx, %esi, %esi
+; FALLBACK26-NEXT: shrxl %ebx, %esi, %esi
; FALLBACK26-NEXT: addl %edi, %edi
-; FALLBACK26-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK26-NEXT: shlxl %edx, %edi, %eax
; FALLBACK26-NEXT: orl %esi, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK26-NEXT: movl 96(%esp,%ecx), %esi
; FALLBACK26-NEXT: leal (%esi,%esi), %edi
-; FALLBACK26-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK26-NEXT: shlxl %edx, %edi, %eax
; FALLBACK26-NEXT: movl 92(%esp,%ecx), %edi
-; FALLBACK26-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK26-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK26-NEXT: orl %ebp, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK26-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK26-NEXT: addl %edi, %edi
-; FALLBACK26-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK26-NEXT: shlxl %edx, %edi, %edi
; FALLBACK26-NEXT: orl %eax, %edi
; FALLBACK26-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK26-NEXT: movl 104(%esp,%ecx), %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK26-NEXT: leal (%eax,%eax), %edi
-; FALLBACK26-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK26-NEXT: shlxl %edx, %edi, %eax
; FALLBACK26-NEXT: movl 100(%esp,%ecx), %edi
-; FALLBACK26-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK26-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK26-NEXT: orl %ebp, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shrxl %edx, %esi, %esi
+; FALLBACK26-NEXT: shrxl %ebx, %esi, %esi
; FALLBACK26-NEXT: addl %edi, %edi
-; FALLBACK26-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK26-NEXT: shlxl %edx, %edi, %eax
; FALLBACK26-NEXT: orl %esi, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl %ecx, %ebp
+; FALLBACK26-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK26-NEXT: movl 112(%esp,%ecx), %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK26-NEXT: leal (%eax,%eax), %esi
-; FALLBACK26-NEXT: shlxl %ebx, %esi, %eax
+; FALLBACK26-NEXT: shlxl %edx, %esi, %eax
; FALLBACK26-NEXT: movl 108(%esp,%ecx), %esi
-; FALLBACK26-NEXT: movl %ecx, %edi
-; FALLBACK26-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shrxl %edx, %esi, %ebp
-; FALLBACK26-NEXT: orl %ebp, %eax
+; FALLBACK26-NEXT: shrxl %ebx, %esi, %edi
+; FALLBACK26-NEXT: orl %edi, %eax
; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK26-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; FALLBACK26-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; FALLBACK26-NEXT: addl %esi, %esi
-; FALLBACK26-NEXT: shlxl %ebx, %esi, %esi
-; FALLBACK26-NEXT: orl %ecx, %esi
-; FALLBACK26-NEXT: movl 120(%esp,%edi), %ebp
-; FALLBACK26-NEXT: leal (%ebp,%ebp), %ecx
-; FALLBACK26-NEXT: shlxl %ebx, %ecx, %ecx
-; FALLBACK26-NEXT: movl 116(%esp,%edi), %eax
-; FALLBACK26-NEXT: shrxl %edx, %eax, %edi
-; FALLBACK26-NEXT: orl %edi, %ecx
-; FALLBACK26-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; FALLBACK26-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: shlxl %edx, %esi, %eax
+; FALLBACK26-NEXT: orl %ecx, %eax
+; FALLBACK26-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK26-NEXT: movl 120(%esp,%ebp), %edi
+; FALLBACK26-NEXT: leal (%edi,%edi), %ecx
+; FALLBACK26-NEXT: shlxl %edx, %ecx, %esi
+; FALLBACK26-NEXT: movl 116(%esp,%ebp), %eax
+; FALLBACK26-NEXT: shrxl %ebx, %eax, %ebp
+; FALLBACK26-NEXT: orl %ebp, %esi
+; FALLBACK26-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; FALLBACK26-NEXT: addl %eax, %eax
-; FALLBACK26-NEXT: shlxl %ebx, %eax, %edi
-; FALLBACK26-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; FALLBACK26-NEXT: shrxl %edx, %ebp, %eax
-; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; FALLBACK26-NEXT: movl 124(%esp,%ebp), %ebp
-; FALLBACK26-NEXT: sarxl %edx, %ebp, %edx
-; FALLBACK26-NEXT: addl %ebp, %ebp
-; FALLBACK26-NEXT: shlxl %ebx, %ebp, %ebx
-; FALLBACK26-NEXT: orl %eax, %ebx
+; FALLBACK26-NEXT: shlxl %edx, %eax, %ecx
+; FALLBACK26-NEXT: orl %ebp, %ecx
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK26-NEXT: movl 124(%esp,%eax), %eax
+; FALLBACK26-NEXT: leal (%eax,%eax), %ebp
+; FALLBACK26-NEXT: shlxl %edx, %ebp, %edx
+; FALLBACK26-NEXT: shrxl %ebx, %edi, %edi
+; FALLBACK26-NEXT: orl %edi, %edx
+; FALLBACK26-NEXT: sarxl %ebx, %eax, %edi
; FALLBACK26-NEXT: movl {{[0-9]+}}(%esp), %eax
-; FALLBACK26-NEXT: movl %edx, 60(%eax)
-; FALLBACK26-NEXT: movl %ebx, 56(%eax)
-; FALLBACK26-NEXT: movl %edi, 48(%eax)
-; FALLBACK26-NEXT: movl %ecx, 52(%eax)
-; FALLBACK26-NEXT: movl %esi, 40(%eax)
+; FALLBACK26-NEXT: movl %edi, 60(%eax)
+; FALLBACK26-NEXT: movl %edx, 56(%eax)
+; FALLBACK26-NEXT: movl %ecx, 48(%eax)
+; FALLBACK26-NEXT: movl %esi, 52(%eax)
+; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK26-NEXT: movl %ecx, 40(%eax)
; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; FALLBACK26-NEXT: movl %ecx, 44(%eax)
; FALLBACK26-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
@@ -23988,111 +23952,112 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %byteOff.ptr, ptr %dst) nounwind {
; FALLBACK30-NEXT: movl %eax, %ecx
; FALLBACK30-NEXT: leal (,%eax,8), %edx
; FALLBACK30-NEXT: andl $24, %edx
+; FALLBACK30-NEXT: movl %edx, %ebx
; FALLBACK30-NEXT: andl $60, %ecx
; FALLBACK30-NEXT: movl 68(%esp,%ecx), %esi
; FALLBACK30-NEXT: movl 72(%esp,%ecx), %edi
; FALLBACK30-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shrxl %edx, %esi, %eax
+; FALLBACK30-NEXT: shrxl %ebx, %esi, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: movl %edx, %ebx
-; FALLBACK30-NEXT: notb %bl
+; FALLBACK30-NEXT: notb %dl
; FALLBACK30-NEXT: leal (%edi,%edi), %ebp
-; FALLBACK30-NEXT: shlxl %ebx, %ebp, %eax
+; FALLBACK30-NEXT: shlxl %edx, %ebp, %eax
; FALLBACK30-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shrxl %edx, 64(%esp,%ecx), %edi
+; FALLBACK30-NEXT: shrxl %ebx, 64(%esp,%ecx), %edi
; FALLBACK30-NEXT: addl %esi, %esi
-; FALLBACK30-NEXT: shlxl %ebx, %esi, %eax
+; FALLBACK30-NEXT: shlxl %edx, %esi, %eax
; FALLBACK30-NEXT: orl %edi, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK30-NEXT: movl 80(%esp,%ecx), %esi
; FALLBACK30-NEXT: leal (%esi,%esi), %edi
-; FALLBACK30-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK30-NEXT: shlxl %edx, %edi, %eax
; FALLBACK30-NEXT: movl 76(%esp,%ecx), %edi
-; FALLBACK30-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK30-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK30-NEXT: orl %ebp, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK30-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK30-NEXT: addl %edi, %edi
-; FALLBACK30-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK30-NEXT: shlxl %edx, %edi, %edi
; FALLBACK30-NEXT: orl %eax, %edi
; FALLBACK30-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK30-NEXT: movl 88(%esp,%ecx), %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK30-NEXT: leal (%eax,%eax), %edi
-; FALLBACK30-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK30-NEXT: shlxl %edx, %edi, %eax
; FALLBACK30-NEXT: movl 84(%esp,%ecx), %edi
-; FALLBACK30-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK30-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK30-NEXT: orl %ebp, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shrxl %edx, %esi, %esi
+; FALLBACK30-NEXT: shrxl %ebx, %esi, %esi
; FALLBACK30-NEXT: addl %edi, %edi
-; FALLBACK30-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK30-NEXT: shlxl %edx, %edi, %eax
; FALLBACK30-NEXT: orl %esi, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK30-NEXT: movl 96(%esp,%ecx), %esi
; FALLBACK30-NEXT: leal (%esi,%esi), %edi
-; FALLBACK30-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK30-NEXT: shlxl %edx, %edi, %eax
; FALLBACK30-NEXT: movl 92(%esp,%ecx), %edi
-; FALLBACK30-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK30-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK30-NEXT: orl %ebp, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; FALLBACK30-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; FALLBACK30-NEXT: addl %edi, %edi
-; FALLBACK30-NEXT: shlxl %ebx, %edi, %edi
+; FALLBACK30-NEXT: shlxl %edx, %edi, %edi
; FALLBACK30-NEXT: orl %eax, %edi
; FALLBACK30-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK30-NEXT: movl 104(%esp,%ecx), %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK30-NEXT: leal (%eax,%eax), %edi
-; FALLBACK30-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK30-NEXT: shlxl %edx, %edi, %eax
; FALLBACK30-NEXT: movl 100(%esp,%ecx), %edi
-; FALLBACK30-NEXT: shrxl %edx, %edi, %ebp
+; FALLBACK30-NEXT: shrxl %ebx, %edi, %ebp
; FALLBACK30-NEXT: orl %ebp, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shrxl %edx, %esi, %esi
+; FALLBACK30-NEXT: shrxl %ebx, %esi, %esi
; FALLBACK30-NEXT: addl %edi, %edi
-; FALLBACK30-NEXT: shlxl %ebx, %edi, %eax
+; FALLBACK30-NEXT: shlxl %edx, %edi, %eax
; FALLBACK30-NEXT: orl %esi, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl %ecx, %ebp
+; FALLBACK30-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK30-NEXT: movl 112(%esp,%ecx), %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; FALLBACK30-NEXT: leal (%eax,%eax), %esi
-; FALLBACK30-NEXT: shlxl %ebx, %esi, %eax
+; FALLBACK30-NEXT: shlxl %edx, %esi, %eax
; FALLBACK30-NEXT: movl 108(%esp,%ecx), %esi
-; FALLBACK30-NEXT: movl %ecx, %edi
-; FALLBACK30-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shrxl %edx, %esi, %ebp
-; FALLBACK30-NEXT: orl %ebp, %eax
+; FALLBACK30-NEXT: shrxl %ebx, %esi, %edi
+; FALLBACK30-NEXT: orl %edi, %eax
; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; FALLBACK30-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; FALLBACK30-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; FALLBACK30-NEXT: addl %esi, %esi
-; FALLBACK30-NEXT: shlxl %ebx, %esi, %esi
-; FALLBACK30-NEXT: orl %ecx, %esi
-; FALLBACK30-NEXT: movl 120(%esp,%edi), %ebp
-; FALLBACK30-NEXT: leal (%ebp,%ebp), %ecx
-; FALLBACK30-NEXT: shlxl %ebx, %ecx, %ecx
-; FALLBACK30-NEXT: movl 116(%esp,%edi), %eax
-; FALLBACK30-NEXT: shrxl %edx, %eax, %edi
-; FALLBACK30-NEXT: orl %edi, %ecx
-; FALLBACK30-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; FALLBACK30-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: shlxl %edx, %esi, %eax
+; FALLBACK30-NEXT: orl %ecx, %eax
+; FALLBACK30-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; FALLBACK30-NEXT: movl 120(%esp,%ebp), %edi
+; FALLBACK30-NEXT: leal (%edi,%edi), %ecx
+; FALLBACK30-NEXT: shlxl %edx, %ecx, %esi
+; FALLBACK30-NEXT: movl 116(%esp,%ebp), %eax
+; FALLBACK30-NEXT: shrxl %ebx, %eax, %ebp
+; FALLBACK30-NEXT: orl %ebp, %esi
+; FALLBACK30-NEXT: shrxl %ebx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; FALLBACK30-NEXT: addl %eax, %eax
-; FALLBACK30-NEXT: shlxl %ebx, %eax, %edi
-; FALLBACK30-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; FALLBACK30-NEXT: shrxl %edx, %ebp, %eax
-; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; FALLBACK30-NEXT: movl 124(%esp,%ebp), %ebp
-; FALLBACK30-NEXT: sarxl %edx, %ebp, %edx
-; FALLBACK30-NEXT: addl %ebp, %ebp
-; FALLBACK30-NEXT: shlxl %ebx, %ebp, %ebx
-; FALLBACK30-NEXT: orl %eax, %ebx
+; FALLBACK30-NEXT: shlxl %edx, %eax, %ecx
+; FALLBACK30-NEXT: orl %ebp, %ecx
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; FALLBACK30-NEXT: movl 124(%esp,%eax), %eax
+; FALLBACK30-NEXT: leal (%eax,%eax), %ebp
+; FALLBACK30-NEXT: shlxl %edx, %ebp, %edx
+; FALLBACK30-NEXT: shrxl %ebx, %edi, %edi
+; FALLBACK30-NEXT: orl %edi, %edx
+; FALLBACK30-NEXT: sarxl %ebx, %eax, %edi
; FALLBACK30-NEXT: movl {{[0-9]+}}(%esp), %eax
-; FALLBACK30-NEXT: movl %edx, 60(%eax)
-; FALLBACK30-NEXT: movl %ebx, 56(%eax)
-; FALLBACK30-NEXT: movl %edi, 48(%eax)
-; FALLBACK30-NEXT: movl %ecx, 52(%eax)
-; FALLBACK30-NEXT: movl %esi, 40(%eax)
+; FALLBACK30-NEXT: movl %edi, 60(%eax)
+; FALLBACK30-NEXT: movl %edx, 56(%eax)
+; FALLBACK30-NEXT: movl %ecx, 48(%eax)
+; FALLBACK30-NEXT: movl %esi, 52(%eax)
+; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; FALLBACK30-NEXT: movl %ecx, 40(%eax)
; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; FALLBACK30-NEXT: movl %ecx, 44(%eax)
; FALLBACK30-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
diff --git a/llvm/test/CodeGen/X86/wide-scalar-shift-legalization.ll b/llvm/test/CodeGen/X86/wide-scalar-shift-legalization.ll
index 338e104..221a51e 100644
--- a/llvm/test/CodeGen/X86/wide-scalar-shift-legalization.ll
+++ b/llvm/test/CodeGen/X86/wide-scalar-shift-legalization.ll
@@ -712,33 +712,33 @@ define void @lshr_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, (%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $3, %cl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $12, %cl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %cl, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%esp,%esi), %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%esp,%esi), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ebx, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $31, %cl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %cl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edi,%edi), %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %edx, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, (%esp,%esi), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $3, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $12, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %dl, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%esp,%edi), %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%esp,%edi), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %ebx, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $31, %al
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %al
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edx,%edx), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, (%esp,%edi), %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebx, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %ebx, %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 12(%esp,%esi), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %esi, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %esi, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 12(%esi)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 8(%esi)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, (%esi)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, 4(%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 12(%esp,%edi), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edi,%edi), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %edi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 12(%edx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 8(%edx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, (%edx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, 4(%edx)
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $44, %esp
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %edi
@@ -994,42 +994,42 @@ define void @shl_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%ecx), %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%ecx), %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 12(%ecx), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl (%eax), %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl (%eax), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm0, %xmm0
; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, (%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $3, %al
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $12, %al
-; X86-HAVE-BMI2-NO-SHLD-NEXT: negb %al
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movsbl %al, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 16(%esp,%edx), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%esp,%edx), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %ecx, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %edi, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $3, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $12, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: negb %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movsbl %dl, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 16(%esp,%esi), %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%esp,%esi), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %edx, %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $31, %al
; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %al
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, 28(%esp,%edx), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%esp,%edx), %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %edx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebx, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ebx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, 28(%esp,%esi), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%esp,%esi), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %esi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %edx, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ecx, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %edx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ecx, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, (%ecx)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 8(%ecx)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, 12(%ecx)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, 4(%ecx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, 12(%ecx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, 4(%ecx)
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $44, %esp
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %edi
@@ -1297,33 +1297,33 @@ define void @ashr_16bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $3, %cl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $12, %cl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %cl, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%esp,%esi), %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%esp,%esi), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ebx, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $31, %cl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %cl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edi,%edi), %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %edx, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, (%esp,%esi), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $3, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $12, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %dl, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%esp,%edi), %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%esp,%edi), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %ebx, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $31, %al
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %al
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edx,%edx), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, (%esp,%edi), %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebx, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %ebx, %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 12(%esp,%esi), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: sarxl %eax, %esi, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %esi, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 12(%esi)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 8(%esi)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, (%esi)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, 4(%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 12(%esp,%edi), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edi,%edi), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: sarxl %ecx, %edi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 12(%edx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 8(%edx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, (%edx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, 4(%edx)
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $44, %esp
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %edi
@@ -1487,31 +1487,31 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrb $6, %cl
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movzbl %cl, %esi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -64(%rsp,%rsi,8), %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -56(%rsp,%rsi,8), %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %rcx, %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, -72(%rsp,%rsi,8), %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %rdi, %r10
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -48(%rsp,%rsi,8), %rsi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %rsi, %r11
-; X64-HAVE-BMI2-NO-SHLD-NEXT: # kill: def $al killed $al killed $rax def $rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %esi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrb $6, %sil
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movzbl %sil, %esi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -64(%rsp,%rsi,8), %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -56(%rsp,%rsi,8), %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %rdi, %r9
; X64-HAVE-BMI2-NO-SHLD-NEXT: andb $63, %al
; X64-HAVE-BMI2-NO-SHLD-NEXT: xorb $63, %al
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r8,%r8), %r10
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %r10, %r10
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r9, %r10
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, -72(%rsp,%rsi,8), %r9
; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rdi, %rdi
; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rdi, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r8, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rcx, %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rcx, %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r9, %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rsi, %rsi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rsi, %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r10, %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r11, 24(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r9, %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r8, %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -48(%rsp,%rsi,8), %rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%rsi,%rsi), %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %r9, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r8, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %rsi, %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rcx, 24(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rax, 16(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rcx, (%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, 8(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, (%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r10, 8(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: retq
;
; X64-HAVE-BMI2-HAVE-SHLD-LABEL: lshr_32bytes:
@@ -1761,88 +1761,90 @@ define void @lshr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%eax), %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%eax), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 12(%eax), %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 16(%eax), %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%eax), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%eax), %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%eax), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%eax), %edx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl (%eax), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl (%eax), %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm0, %xmm0
; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $5, %cl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %cl, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 36(%esp,%esi,4), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 40(%esp,%esi,4), %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ecx, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $31, %dl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %dl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebx, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ebx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $5, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %dl, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 36(%esp,%esi,4), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 40(%esp,%esi,4), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %edx, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $31, %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%ebp,%ebp), %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebx, %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, 32(%esp,%esi,4), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 48(%esp,%esi,4), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ecx, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 44(%esp,%esi,4), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ecx, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 56(%esp,%esi,4), %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%ebx,%ebx), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 52(%esp,%esi,4), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %edi, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ebx, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 60(%esp,%esi,4), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %esi, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %esi, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 48(%esp,%esi,4), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %edx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 44(%esp,%esi,4), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %edx, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ebp, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %edx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 56(%esp,%esi,4), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edi,%edi), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 52(%esp,%esi,4), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebp, %eax, %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebx, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 28(%esi)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, 24(%esi)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, 16(%esi)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 20(%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebp, {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %eax, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %eax, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 60(%esp,%esi,4), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebp, %edi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebp, %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, 28(%edi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 24(%edi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 16(%edi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, 20(%edi)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 8(%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 8(%edi)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 12(%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 12(%edi)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, (%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, (%edi)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 4(%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 4(%edi)
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $108, %esp
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %edi
@@ -2040,32 +2042,32 @@ define void @shl_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrb $3, %cl
-; X64-HAVE-BMI2-NO-SHLD-NEXT: andb $24, %cl
-; X64-HAVE-BMI2-NO-SHLD-NEXT: negb %cl
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movsbq %cl, %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %esi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrb $3, %sil
+; X64-HAVE-BMI2-NO-SHLD-NEXT: andb $24, %sil
+; X64-HAVE-BMI2-NO-SHLD-NEXT: negb %sil
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movsbq %sil, %rdi
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -40(%rsp,%rdi), %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -32(%rsp,%rdi), %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rcx, %rsi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, -16(%rsp,%rdi), %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -24(%rsp,%rdi), %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rdi, %r10
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %r8, %r11
-; X64-HAVE-BMI2-NO-SHLD-NEXT: # kill: def $al killed $al killed $rax def $rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -32(%rsp,%rdi), %rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rcx, %rsi, %r9
; X64-HAVE-BMI2-NO-SHLD-NEXT: andb $63, %al
; X64-HAVE-BMI2-NO-SHLD-NEXT: xorb $63, %al
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rcx, %r8, %r10
; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %r8
; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %r8, %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rsi, %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r9, %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rcx, -16(%rsp,%rdi), %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -24(%rsp,%rdi), %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rcx, %rdi, %rcx
; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %rdi, %rsi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r9, %rsi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %rcx, %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r10, %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r11, (%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %rdi, %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r9, %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %rsi, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rcx, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r10, (%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rax, 16(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rsi, 24(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, 24(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r8, 8(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: retq
;
@@ -2319,97 +2321,101 @@ define void @shl_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%eax), %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%eax), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 12(%eax), %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 16(%eax), %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%eax), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%eax), %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%eax), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%eax), %edx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl (%eax), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl (%eax), %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm0, %xmm0
; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $3, %cl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $28, %cl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: negb %cl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movsbl %cl, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $3, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $28, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: negb %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movsbl %dl, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 64(%esp,%esi), %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 68(%esp,%esi), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 68(%esp,%esi), %edx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %ecx, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $31, %dl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %edx, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $31, %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %cl
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %ebx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %ebx, %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 72(%esp,%esi), %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %edi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %edi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 76(%esp,%esi), %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebp, %edi, %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %esi, %ebx, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 80(%esp,%ebp), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 80(%esp,%ebp), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %ebx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %ebx, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 84(%esp,%ebp), %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %esi, %ebx, %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %esi, %ecx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %esi, %edx, %edx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ecx, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %esi, {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, 92(%esp,%esi), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 88(%esp,%esi), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %edi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edx, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %esi, {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %esi, 92(%esp,%edx), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 88(%esp,%edx), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %esi, %edx
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %esi, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %ebx, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %eax, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, (%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, 24(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, 28(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, 16(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 20(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 8(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 12(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 4(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %ebx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, (%ecx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 24(%ecx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, 28(%ecx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, 16(%ecx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 20(%ecx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 8(%ecx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 12(%ecx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 4(%ecx)
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $108, %esp
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %edi
@@ -2610,31 +2616,31 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrb $6, %cl
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movzbl %cl, %esi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -64(%rsp,%rsi,8), %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -56(%rsp,%rsi,8), %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %rcx, %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, -72(%rsp,%rsi,8), %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %rdi, %r10
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -48(%rsp,%rsi,8), %rsi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: sarxq %rax, %rsi, %r11
-; X64-HAVE-BMI2-NO-SHLD-NEXT: # kill: def $al killed $al killed $rax def $rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %esi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrb $6, %sil
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movzbl %sil, %esi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -64(%rsp,%rsi,8), %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -56(%rsp,%rsi,8), %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %rdi, %r9
; X64-HAVE-BMI2-NO-SHLD-NEXT: andb $63, %al
; X64-HAVE-BMI2-NO-SHLD-NEXT: xorb $63, %al
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r8,%r8), %r10
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %r10, %r10
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r9, %r10
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, -72(%rsp,%rsi,8), %r9
; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rdi, %rdi
; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rdi, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r8, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rcx, %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rcx, %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r9, %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rsi, %rsi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rsi, %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r10, %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r11, 24(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r9, %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r8, %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -48(%rsp,%rsi,8), %rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%rsi,%rsi), %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %r9, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r8, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: sarxq %rcx, %rsi, %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rcx, 24(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rax, 16(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rcx, (%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, 8(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, (%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r10, 8(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: retq
;
; X64-HAVE-BMI2-HAVE-SHLD-LABEL: ashr_32bytes:
@@ -2927,60 +2933,59 @@ define void @ashr_32bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $5, %cl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %cl, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 36(%esp,%esi,4), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 40(%esp,%esi,4), %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ecx, %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $31, %dl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %dl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebx, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ebx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $5, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %dl, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 36(%esp,%esi,4), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 40(%esp,%esi,4), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %edx, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $31, %al
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %al
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%ebp,%ebp), %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %ebx, %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, 32(%esp,%esi,4), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 48(%esp,%esi,4), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ecx, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 44(%esp,%esi,4), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ecx, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 56(%esp,%esi,4), %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%ebx,%ebx), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 52(%esp,%esi,4), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %edi, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %ebx, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 60(%esp,%esi,4), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: sarxl %eax, %esi, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %esi, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, 32(%esp,%esi,4), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 48(%esp,%esi,4), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %edx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 44(%esp,%esi,4), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %edx, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %ebp, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %edx, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 56(%esp,%esi,4), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 52(%esp,%esi,4), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %eax, %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %eax, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %eax, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 60(%esp,%esi,4), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %ebx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: sarxl %ecx, %esi, %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 28(%esi)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, 24(%esi)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, 16(%esi)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 20(%esi)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 8(%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 28(%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, 24(%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 16(%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, 20(%esi)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, 8(%esi)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 12(%esi)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
@@ -3263,13 +3268,11 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
;
; X64-HAVE-BMI2-NO-SHLD-LABEL: lshr_64bytes:
; X64-HAVE-BMI2-NO-SHLD: # %bb.0:
-; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %rbp
; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %r15
; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %r14
; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %r13
; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %r12
; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %rbx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %rax
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq (%rdi), %rcx
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq 8(%rdi), %r8
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq 16(%rdi), %r9
@@ -3292,65 +3295,63 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r9, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r8, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rcx, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $63, %ecx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %esi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $63, %esi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
; X64-HAVE-BMI2-NO-SHLD-NEXT: shrl $3, %eax
; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $56, %eax
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -120(%rsp,%rax), %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -112(%rsp,%rax), %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r8, %r15
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, -128(%rsp,%rax), %rbx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %esi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -104(%rsp,%rax), %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r9, %r13
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %rdi, %r10
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -88(%rsp,%rax), %r11
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r11, %r14
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %r12d
-; X64-HAVE-BMI2-NO-SHLD-NEXT: notl %r12d
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rdi, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %r12, %rdi, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r15, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -96(%rsp,%rax), %r15
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r15, %rbp
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -112(%rsp,%rax), %r11
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r8, %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, %r10d
+; X64-HAVE-BMI2-NO-SHLD-NEXT: notl %r10d
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r11,%r11), %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %r10, %rdi, %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r9, %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, -128(%rsp,%rax), %r9
; X64-HAVE-BMI2-NO-SHLD-NEXT: xorb $63, %sil
; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %r8, %r8
; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %r8, %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rbx, %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r15,%r15), %rbx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %r12, %rbx, %rbx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r13, %rbx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -80(%rsp,%rax), %r15
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r15, %r13
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -72(%rsp,%rax), %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %rax, %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %r9, %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %r9, %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r10, %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r15,%r15), %r10
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %r12, %r10, %r10
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r9, %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -104(%rsp,%rax), %rbx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %rbx, %r14
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -96(%rsp,%rax), %r15
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r15,%r15), %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %r10, %r9, %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r14, %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r11, %r11
+; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rbx, %rbx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %rbx, %rbx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r11, %rbx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -88(%rsp,%rax), %r11
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r11, %r14
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -80(%rsp,%rax), %r12
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r12,%r12), %r13
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %r10, %r13, %r10
; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r14, %r10
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r15, %r14
; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %r11, %r11
; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %r11, %r11
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rbp, %r11
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rax, %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %rax, %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r13, %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rcx, 56(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rax, 48(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r14, %r11
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r12, %r14
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -72(%rsp,%rax), %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%rax,%rax), %r15
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %r15, %rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r14, %rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %rax, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rax, 56(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rsi, 48(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r11, 32(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r10, 40(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r9, 16(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rbx, 24(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rbx, 16(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r9, 24(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r8, (%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, 8(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq $8, %rsp
; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %rbx
; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %r12
; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %r13
; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %r14
; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %r15
-; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %rbp
; X64-HAVE-BMI2-NO-SHLD-NEXT: retq
;
; X64-HAVE-BMI2-HAVE-SHLD-LABEL: lshr_64bytes:
@@ -3868,20 +3869,20 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 40(%eax), %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 44(%eax), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 48(%eax), %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 52(%eax), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 56(%eax), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 60(%eax), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 48(%eax), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 52(%eax), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 56(%eax), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 60(%eax), %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%eax), %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm0, %xmm0
; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
@@ -3906,116 +3907,117 @@ define void @lshr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $31, %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $31, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl $3, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $60, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 68(%esp,%ecx), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 72(%esp,%ecx), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl $3, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $60, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 68(%esp,%ebx), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 72(%esp,%ebx), %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %edi, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: notl %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %ebp, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebx, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %edi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %bl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %cl
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %edi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, 64(%esp,%ecx), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %edi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, 64(%esp,%ebx), %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 80(%esp,%ecx), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 80(%esp,%ebx), %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %ebp, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 76(%esp,%ecx), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 76(%esp,%ebx), %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %ebp, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 88(%esp,%ecx), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 88(%esp,%ebx), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %ebp, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 84(%esp,%ecx), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 84(%esp,%ebx), %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %ebp, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 96(%esp,%ecx), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 96(%esp,%ebx), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %esi, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 92(%esp,%ecx), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 92(%esp,%ebx), %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %esi, %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %esi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 104(%esp,%ecx), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 104(%esp,%ebx), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %ebp, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 100(%esp,%ecx), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 100(%esp,%ebx), %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %ebp, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 112(%esp,%ecx), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 112(%esp,%ebx), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %esi, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 108(%esp,%ecx), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %esi, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %esi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 108(%esp,%ebx), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %esi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %esi, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 120(%esp,%ebx), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edi,%edi), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 116(%esp,%ebx), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %ebp, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 120(%esp,%ecx), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %esi, %edi, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 116(%esp,%ecx), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %edi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %eax, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 124(%esp,%ecx), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 124(%esp,%ebx), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %edi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %eax, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %eax, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %eax, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, 60(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, 56(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, 48(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, 52(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, 56(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, 48(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 52(%eax)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 40(%eax)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
@@ -4388,10 +4390,8 @@ define void @shl_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
;
; X64-HAVE-BMI2-NO-SHLD-LABEL: shl_64bytes:
; X64-HAVE-BMI2-NO-SHLD: # %bb.0:
-; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %rbp
; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %r15
; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %r14
-; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %r13
; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %r12
; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %rbx
; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %rax
@@ -4419,63 +4419,61 @@ define void @shl_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rax, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, %eax
; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $63, %eax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
; X64-HAVE-BMI2-NO-SHLD-NEXT: shrl $3, %esi
; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $56, %esi
; X64-HAVE-BMI2-NO-SHLD-NEXT: negl %esi
; X64-HAVE-BMI2-NO-SHLD-NEXT: movslq %esi, %rsi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -64(%rsp,%rsi), %r10
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -56(%rsp,%rsi), %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rcx, %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -40(%rsp,%rsi), %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rdi, %r11
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -48(%rsp,%rsi), %r14
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %r14, %rbx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -24(%rsp,%rsi), %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %r8, %r15
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %r10, %r12
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %r13d
-; X64-HAVE-BMI2-NO-SHLD-NEXT: xorb $63, %r13b
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %r10
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %r13, %r10, %r10
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r9, %r10
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -32(%rsp,%rsi), %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %r9, %rbp
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %r14
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %r13, %r14, %r14
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r11, %r14
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, -8(%rsp,%rsi), %r11
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -16(%rsp,%rsi), %rsi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rsi, %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %r13, %rcx, %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rbx, %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -64(%rsp,%rsi), %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -56(%rsp,%rsi), %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rcx, %rdi, %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: xorb $63, %al
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rcx, %r9, %r10
; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %r13, %r9, %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r15, %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %r9, %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r8, %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -40(%rsp,%rsi), %r11
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rcx, %r11, %rbx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -48(%rsp,%rsi), %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rcx, %r8, %r14
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %r8, %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rbx, %r8
; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %r13, %rdi, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rbp, %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %rdi, %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r14, %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -24(%rsp,%rsi), %rbx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rcx, %rbx, %r14
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -32(%rsp,%rsi), %r15
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rcx, %r15, %r12
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %r15
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %r15, %r15
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r14, %r15
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %r11
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %r11, %r11
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r12, %r11
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rcx, -8(%rsp,%rsi), %r14
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -16(%rsp,%rsi), %rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rcx, %rsi, %rcx
; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %rsi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %r13, %rsi, %rsi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r11, %rsi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %r13, %r8, %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rax, %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r12, (%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r8, 48(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %rsi, %rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r14, %rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrq %rbx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %rbx, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rcx, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r10, (%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rax, 48(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rsi, 56(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, 32(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r9, 40(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rcx, 16(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r14, 24(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r10, 8(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r11, 32(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r15, 40(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, 16(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r8, 24(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r9, 8(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: addq $8, %rsp
; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %rbx
; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %r12
-; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %r13
; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %r14
; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %r15
-; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %rbp
; X64-HAVE-BMI2-NO-SHLD-NEXT: retq
;
; X64-HAVE-BMI2-HAVE-SHLD-LABEL: shl_64bytes:
@@ -4972,33 +4970,33 @@ define void @shl_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $204, %esp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%ebp), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%ebp), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%ebp), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 12(%ebp), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 16(%ebp), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%ebp), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%ebp), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%ebp), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 32(%ebp), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, (%esp) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 36(%ebp), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 40(%ebp), %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 44(%ebp), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 48(%ebp), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 52(%ebp), %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 56(%ebp), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 60(%ebp), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 12(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 16(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 32(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 36(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 40(%eax), %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 44(%eax), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 48(%eax), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 52(%eax), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 56(%eax), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 60(%eax), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%ebp), %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm0, %xmm0
@@ -5011,7 +5009,7 @@ define void @shl_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%esp), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
@@ -5032,149 +5030,152 @@ define void @shl_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $31, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl $3, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $60, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: leal {{[0-9]+}}(%esp), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: subl %ebp, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%edi), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%edi), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $31, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl $3, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $60, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %bl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %ecx, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %eax, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ecx, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal {{[0-9]+}}(%esp), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: subl %eax, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%edx), %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%edi), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %ecx, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 12(%edi), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ecx, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%edx), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %eax, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 16(%edi), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %eax, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%edi), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %esi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %bl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %esi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %eax, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%edx), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 12(%edx), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %esi, %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %ecx, %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %eax, %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%edi), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, (%esp) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 16(%edx), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %ecx, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%edi), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ecx, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%edx), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %ecx, %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, (%esp), %eax # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %eax, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, (%esp) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 32(%edi), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %eax, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 36(%edi), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %esi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%edx), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%edx), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %esi, %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %eax, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 40(%edi), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %ecx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 32(%edx), %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %ecx, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 44(%edi), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 36(%edx), %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ecx, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %ecx, %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %eax, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 48(%edi), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %eax, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 52(%edi), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 40(%edx), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %esi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 44(%edx), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %esi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ecx, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ebp, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %edi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %eax, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %eax, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: negl %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, 188(%esp,%ecx), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 56(%edi), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %edi, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 48(%edx), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %edi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 52(%edx), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %esi, %ecx, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %esi, %ebp, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %eax, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %eax, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 56(%edx), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %ecx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebp, %edi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ecx, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %edi, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %eax, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, (%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 56(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, 60(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, 48(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 52(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 40(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 44(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 32(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 36(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%esp), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 24(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 28(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 16(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 20(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 8(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 12(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 4(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %edi, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebp, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: negl %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebp, 188(%esp,%ebx), %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ecx, %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, (%edx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 56(%edx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, 60(%edx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, 48(%edx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 52(%edx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 40(%edx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 44(%edx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 32(%edx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 36(%edx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 24(%edx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 28(%edx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 16(%edx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 20(%edx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 8(%edx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 12(%edx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 4(%edx)
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $204, %esp
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %edi
@@ -5534,13 +5535,11 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
;
; X64-HAVE-BMI2-NO-SHLD-LABEL: ashr_64bytes:
; X64-HAVE-BMI2-NO-SHLD: # %bb.0:
-; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %rbp
; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %r15
; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %r14
; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %r13
; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %r12
; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %rbx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %rax
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq (%rdi), %rcx
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq 8(%rdi), %r8
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq 16(%rdi), %r9
@@ -5567,65 +5566,63 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, -{{[0-9]+}}(%rsp)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $63, %ecx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %esi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $63, %esi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
; X64-HAVE-BMI2-NO-SHLD-NEXT: shrl $3, %eax
; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $56, %eax
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -120(%rsp,%rax), %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -112(%rsp,%rax), %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r8, %r15
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, -128(%rsp,%rax), %rbx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %esi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -104(%rsp,%rax), %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r9, %r13
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %rdi, %r10
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -88(%rsp,%rax), %r11
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r11, %r14
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %r12d
-; X64-HAVE-BMI2-NO-SHLD-NEXT: notl %r12d
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rdi, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %r12, %rdi, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r15, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -96(%rsp,%rax), %r15
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r15, %rbp
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -112(%rsp,%rax), %r11
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r8, %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, %r10d
+; X64-HAVE-BMI2-NO-SHLD-NEXT: notl %r10d
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r11,%r11), %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %r10, %rdi, %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r9, %rdi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, -128(%rsp,%rax), %r9
; X64-HAVE-BMI2-NO-SHLD-NEXT: xorb $63, %sil
; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %r8, %r8
; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %r8, %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rbx, %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r15,%r15), %rbx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %r12, %rbx, %rbx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r13, %rbx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -80(%rsp,%rax), %r15
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r15, %r13
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -72(%rsp,%rax), %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: sarxq %rcx, %rax, %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %r9, %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %r9, %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r10, %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r15,%r15), %r10
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %r12, %r10, %r10
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r9, %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -104(%rsp,%rax), %rbx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %rbx, %r14
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -96(%rsp,%rax), %r15
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r15,%r15), %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %r10, %r9, %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r14, %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r11, %r11
+; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rbx, %rbx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %rbx, %rbx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r11, %rbx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -88(%rsp,%rax), %r11
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r11, %r14
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -80(%rsp,%rax), %r12
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r12,%r12), %r13
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %r10, %r13, %r10
; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r14, %r10
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r15, %r14
; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %r11, %r11
; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %r11, %r11
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rbp, %r11
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rax, %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %rax, %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r13, %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rcx, 56(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rax, 48(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r14, %r11
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r12, %r14
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -72(%rsp,%rax), %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%rax,%rax), %r15
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %r15, %rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r14, %rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: sarxq %rcx, %rax, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rax, 56(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rsi, 48(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r11, 32(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r10, 40(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r9, 16(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rbx, 24(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rbx, 16(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r9, 24(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r8, (%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, 8(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq $8, %rsp
; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %rbx
; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %r12
; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %r13
; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %r14
; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %r15
-; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %rbp
; X64-HAVE-BMI2-NO-SHLD-NEXT: retq
;
; X64-HAVE-BMI2-HAVE-SHLD-LABEL: ashr_64bytes:
@@ -6221,33 +6218,31 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[0-9]+}}(%esp)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $31, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $31, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %edx
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrl $3, %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $60, %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 68(%esp,%ebx), %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 72(%esp,%ebx), %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %edi, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: notl %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: notl %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %eax, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %ebp, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %edi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %al
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %cl
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %edi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %edi, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, 64(%esp,%ebx), %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 80(%esp,%ebx), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 80(%esp,%ebx), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %ebp, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 76(%esp,%ebx), %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %ebp, %esi
@@ -6256,87 +6251,84 @@ define void @ashr_64bytes(ptr %src.ptr, ptr %bitOff.ptr, ptr %dst) nounwind {
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 88(%esp,%ebx), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %ebp, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 84(%esp,%ebx), %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %ebp, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %ebp, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 96(%esp,%ebx), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %esi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 92(%esp,%ebx), %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %esi, %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %esi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 104(%esp,%ebx), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %ebp, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 100(%esp,%ebx), %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %ebp, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 112(%esp,%ebx), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edi, %esi, %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 108(%esp,%ebx), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %esi, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %esi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %esi, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 120(%esp,%ebx), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edi,%edi), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %ebp, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 116(%esp,%ebx), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %ebp, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 120(%esp,%ebx), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %esi, %edi, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 116(%esp,%ebx), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %edi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %eax, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 124(%esp,%ebx), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%eax,%eax), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %edi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: sarxl %edx, %eax, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %eax, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %eax, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, 60(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebx, 56(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, 48(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, 52(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, 56(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, 48(%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 52(%eax)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 40(%eax)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
diff --git a/llvm/test/CodeGen/X86/widen-load-of-small-alloca-with-zero-upper-half.ll b/llvm/test/CodeGen/X86/widen-load-of-small-alloca-with-zero-upper-half.ll
index c3054a3..6b5c604 100644
--- a/llvm/test/CodeGen/X86/widen-load-of-small-alloca-with-zero-upper-half.ll
+++ b/llvm/test/CodeGen/X86/widen-load-of-small-alloca-with-zero-upper-half.ll
@@ -1635,22 +1635,22 @@ define void @load_16byte_chunk_of_32byte_alloca_with_zero_upper_half(ptr %src, i
; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, %eax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrb $6, %al
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movzbl %al, %eax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rsi, -72(%rsp,%rax,8), %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -64(%rsp,%rax,8), %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rsi, %rdi, %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: # kill: def $sil killed $sil killed $rsi def $rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrb $6, %cl
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movzbl %cl, %ecx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, -72(%rsp,%rcx,8), %rdi
; X64-HAVE-BMI2-NO-SHLD-NEXT: notb %sil
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -56(%rsp,%rax,8), %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rdi, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %rdi, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rcx, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rax, %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %rax, %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r8, %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rax, 8(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, (%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -64(%rsp,%rcx,8), %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -56(%rsp,%rcx,8), %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r8,%r8), %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %r9, %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rdi, %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %r8, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rcx, %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %rcx, %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rax, %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rcx, 8(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r9, (%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: retq
;
; X64-HAVE-BMI2-HAVE-SHLD-LABEL: load_16byte_chunk_of_32byte_alloca_with_zero_upper_half:
@@ -1807,40 +1807,43 @@ define void @load_16byte_chunk_of_32byte_alloca_with_zero_upper_half(ptr %src, i
; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $5, %cl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %cl, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, 16(%esp,%ecx,4), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%esp,%ecx,4), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %esi, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $5, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %dl, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, 16(%esp,%esi,4), %edx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %dl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %esi, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%esp,%ecx,4), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $24, %bl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %bl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%esp,%ecx,4), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %bl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%esp,%esi,4), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edi,%edi), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %edx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%esp,%esi,4), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebp, %edi, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $24, %al
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %al
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%ecx,%ecx), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %edi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edx, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%esp,%esi,4), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%ecx,%ecx), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebp, {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebp, %ecx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 32(%esp,%esi,4), %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %esi, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 32(%esp,%ecx,4), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %eax, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 12(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, 8(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, 4(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, (%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ecx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 12(%ecx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, 8(%ecx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, 4(%ecx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, (%ecx)
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $92, %esp
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %edi
@@ -1906,13 +1909,13 @@ define void @load_1byte_chunk_of_64byte_alloca_with_zero_upper_half(ptr %src, i6
; X64-BMI2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-BMI2-NEXT: leal (,%rsi,8), %eax
; X64-BMI2-NEXT: andl $56, %eax
-; X64-BMI2-NEXT: andl $56, %esi
-; X64-BMI2-NEXT: shrxq %rax, -128(%rsp,%rsi), %rcx
-; X64-BMI2-NEXT: # kill: def $eax killed $eax killed $rax def $rax
+; X64-BMI2-NEXT: movl %eax, %ecx
; X64-BMI2-NEXT: notl %eax
-; X64-BMI2-NEXT: movl -120(%rsp,%rsi), %esi
-; X64-BMI2-NEXT: addl %esi, %esi
-; X64-BMI2-NEXT: shlxq %rax, %rsi, %rax
+; X64-BMI2-NEXT: andl $56, %esi
+; X64-BMI2-NEXT: movl -120(%rsp,%rsi), %edi
+; X64-BMI2-NEXT: addl %edi, %edi
+; X64-BMI2-NEXT: shlxq %rax, %rdi, %rax
+; X64-BMI2-NEXT: shrxq %rcx, -128(%rsp,%rsi), %rcx
; X64-BMI2-NEXT: orl %eax, %ecx
; X64-BMI2-NEXT: movb %cl, (%rdx)
; X64-BMI2-NEXT: popq %rax
@@ -2070,13 +2073,13 @@ define void @load_2byte_chunk_of_64byte_alloca_with_zero_upper_half(ptr %src, i6
; X64-BMI2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-BMI2-NEXT: leal (,%rsi,8), %eax
; X64-BMI2-NEXT: andl $56, %eax
-; X64-BMI2-NEXT: andl $56, %esi
-; X64-BMI2-NEXT: shrxq %rax, -128(%rsp,%rsi), %rcx
-; X64-BMI2-NEXT: # kill: def $eax killed $eax killed $rax def $rax
+; X64-BMI2-NEXT: movl %eax, %ecx
; X64-BMI2-NEXT: notl %eax
-; X64-BMI2-NEXT: movl -120(%rsp,%rsi), %esi
-; X64-BMI2-NEXT: addl %esi, %esi
-; X64-BMI2-NEXT: shlxq %rax, %rsi, %rax
+; X64-BMI2-NEXT: andl $56, %esi
+; X64-BMI2-NEXT: movl -120(%rsp,%rsi), %edi
+; X64-BMI2-NEXT: addl %edi, %edi
+; X64-BMI2-NEXT: shlxq %rax, %rdi, %rax
+; X64-BMI2-NEXT: shrxq %rcx, -128(%rsp,%rsi), %rcx
; X64-BMI2-NEXT: orl %eax, %ecx
; X64-BMI2-NEXT: movw %cx, (%rdx)
; X64-BMI2-NEXT: popq %rax
@@ -2233,13 +2236,13 @@ define void @load_4byte_chunk_of_64byte_alloca_with_zero_upper_half(ptr %src, i6
; X64-BMI2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-BMI2-NEXT: leal (,%rsi,8), %eax
; X64-BMI2-NEXT: andl $56, %eax
-; X64-BMI2-NEXT: andl $56, %esi
-; X64-BMI2-NEXT: shrxq %rax, -128(%rsp,%rsi), %rcx
-; X64-BMI2-NEXT: # kill: def $eax killed $eax killed $rax def $rax
+; X64-BMI2-NEXT: movl %eax, %ecx
; X64-BMI2-NEXT: notl %eax
-; X64-BMI2-NEXT: movl -120(%rsp,%rsi), %esi
-; X64-BMI2-NEXT: addl %esi, %esi
-; X64-BMI2-NEXT: shlxq %rax, %rsi, %rax
+; X64-BMI2-NEXT: andl $56, %esi
+; X64-BMI2-NEXT: movl -120(%rsp,%rsi), %edi
+; X64-BMI2-NEXT: addl %edi, %edi
+; X64-BMI2-NEXT: shlxq %rax, %rdi, %rax
+; X64-BMI2-NEXT: shrxq %rcx, -128(%rsp,%rsi), %rcx
; X64-BMI2-NEXT: orl %eax, %ecx
; X64-BMI2-NEXT: movl %ecx, (%rdx)
; X64-BMI2-NEXT: popq %rax
@@ -2521,10 +2524,11 @@ define void @load_8byte_chunk_of_64byte_alloca_with_zero_upper_half(ptr %src, i6
;
; X86-HAVE-BMI2-NO-SHLD-LABEL: load_8byte_chunk_of_64byte_alloca_with_zero_upper_half:
; X86-HAVE-BMI2-NO-SHLD: # %bb.0:
+; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $128, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $140, %esp
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -2541,25 +2545,26 @@ define void @load_8byte_chunk_of_64byte_alloca_with_zero_upper_half(ptr %src, i6
; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, (%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (,%ecx,8), %edx
; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $24, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $60, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, (%esp,%ecx), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%esp,%ecx), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %edx, %edi, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: # kill: def $dl killed $dl killed $edx def $edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, (%esp,%ecx), %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 4(%esp,%ecx), %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 8(%esp,%ecx), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edi,%edi), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ebp, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %edi, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ecx, %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 4(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, (%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $128, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, (%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $140, %esp
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %ebx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: retl
%init = load <32 x i8>, ptr %src, align 1
%intermediate.sroa.0.0.vec.expand = shufflevector <32 x i8> %init, <32 x i8> poison, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -2667,21 +2672,21 @@ define void @load_16byte_chunk_of_64byte_alloca_with_zero_upper_half(ptr %src, i
; X64-HAVE-BMI2-NO-SHLD-NEXT: leal (,%rsi,8), %eax
; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $56, %ecx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %edi
; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $56, %esi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, -128(%rsp,%rsi), %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -120(%rsp,%rsi), %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r8, %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $rcx def $rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rdi, -128(%rsp,%rsi), %r8
; X64-HAVE-BMI2-NO-SHLD-NEXT: notb %cl
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -120(%rsp,%rsi), %r9
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -112(%rsp,%rsi), %rsi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %r8, %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rcx, %r8, %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rdi, %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r9,%r9), %r10
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rcx, %r10, %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r8, %rcx
; X64-HAVE-BMI2-NO-SHLD-NEXT: notl %eax
; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rsi, %rsi
; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rsi, %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rax, %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r9, 8(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rdi, %r9, %rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rax, %rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rsi, 8(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rcx, (%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %rax
; X64-HAVE-BMI2-NO-SHLD-NEXT: retq
@@ -2860,33 +2865,33 @@ define void @load_16byte_chunk_of_64byte_alloca_with_zero_upper_half(ptr %src, i
; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (,%eax,8), %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $24, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $60, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, 16(%esp,%eax), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%esp,%eax), %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %edx, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %bl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $60, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, 16(%esp,%eax), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%esp,%eax), %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%esp,%eax), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %edx, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edi,%edi), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %edx, %edx
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %esi, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %ebp, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %edi, %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%ebp,%ebp), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %ebp, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%esp,%eax), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %ebp, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %ebp, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%ebp,%ebp), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %edi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %ebp, %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 32(%esp,%eax), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %eax, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %eax, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ecx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %eax, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebx, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 12(%ecx)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, 8(%ecx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, 8(%ecx)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, 4(%ecx)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, (%ecx)
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $156, %esp
@@ -3026,9 +3031,7 @@ define void @load_32byte_chunk_of_64byte_alloca_with_zero_upper_half(ptr %src, i
;
; X64-HAVE-BMI2-NO-SHLD-LABEL: load_32byte_chunk_of_64byte_alloca_with_zero_upper_half:
; X64-HAVE-BMI2-NO-SHLD: # %bb.0:
-; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %r14
; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %rbx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: pushq %rax
; X64-HAVE-BMI2-NO-SHLD-NEXT: movups (%rdi), %xmm0
; X64-HAVE-BMI2-NO-SHLD-NEXT: movups 16(%rdi), %xmm1
; X64-HAVE-BMI2-NO-SHLD-NEXT: xorps %xmm2, %xmm2
@@ -3043,38 +3046,36 @@ define void @load_32byte_chunk_of_64byte_alloca_with_zero_upper_half(ptr %src, i
; X64-HAVE-BMI2-NO-SHLD-NEXT: leal (,%rsi,8), %eax
; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $56, %ecx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %edi
; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $56, %esi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, -128(%rsp,%rsi), %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -120(%rsp,%rsi), %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -112(%rsp,%rsi), %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r8, %r10
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %r9, %r11
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -104(%rsp,%rsi), %rbx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rcx, %rbx, %r14
-; X64-HAVE-BMI2-NO-SHLD-NEXT: # kill: def $cl killed $cl killed $rcx def $rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rdi, -128(%rsp,%rsi), %r8
; X64-HAVE-BMI2-NO-SHLD-NEXT: notb %cl
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %r8, %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rcx, %r8, %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rdi, %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -120(%rsp,%rsi), %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -112(%rsp,%rsi), %r10
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r9,%r9), %r11
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rcx, %r11, %r11
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r8, %r11
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rdi, %r9, %r8
; X64-HAVE-BMI2-NO-SHLD-NEXT: notl %eax
; X64-HAVE-BMI2-NO-SHLD-NEXT: andl $63, %eax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r9,%r9), %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rdi, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r10, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%rbx,%rbx), %r9
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rcx, %r9, %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r11, %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r10,%r10), %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %r9, %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r8, %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rdi, %r10, %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -104(%rsp,%rsi), %r10
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r10,%r10), %rbx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rcx, %rbx, %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r8, %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rdi, %r10, %rdi
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -96(%rsp,%rsi), %rsi
; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rsi, %rsi
; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rax, %rsi, %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r14, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rdi, %rax
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rax, 24(%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rcx, 16(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, 8(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r8, (%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq $8, %rsp
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r9, 8(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r11, (%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %rbx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: popq %r14
; X64-HAVE-BMI2-NO-SHLD-NEXT: retq
;
; X64-HAVE-BMI2-HAVE-SHLD-LABEL: load_32byte_chunk_of_64byte_alloca_with_zero_upper_half:
@@ -3304,7 +3305,7 @@ define void @load_32byte_chunk_of_64byte_alloca_with_zero_upper_half(ptr %src, i
; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %ebx
; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: pushl %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $156, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: subl $172, %esp
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movups (%ecx), %xmm0
@@ -3320,59 +3321,60 @@ define void @load_32byte_chunk_of_64byte_alloca_with_zero_upper_half(ptr %src, i
; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (,%eax,8), %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $24, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $60, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, 16(%esp,%eax), %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%esp,%eax), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %esi, %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %bl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%esp,%eax), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andl $60, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, 32(%esp,%eax), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %cl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 36(%esp,%eax), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 40(%esp,%eax), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edx, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %esi, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edi,%edi), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %esi, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edx, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %ebp, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%ebp,%ebp), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%esp,%eax), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %edi, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 44(%esp,%eax), %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %edi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %edi, %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edx, %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %esi, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 32(%esp,%eax), %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %esi, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 48(%esp,%eax), %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %ebp, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edx, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, (%esp) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %esi, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 36(%esp,%eax), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %esi, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 52(%esp,%eax), %edx
; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edx,%edx), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %esi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %esi, %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %edx, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 40(%esp,%eax), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %edx, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 56(%esp,%eax), %edx
; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edx,%edx), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %esi, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %esi, %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebp, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %edx, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 44(%esp,%eax), %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, %ebp, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ebp, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %ebp, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edx, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 48(%esp,%eax), %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 60(%esp,%eax), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edx,%edx), %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %ebp, %ebp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebx, %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 64(%esp,%eax), %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %eax, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %eax, %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ecx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ecx, %eax, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edx, %eax
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 28(%ecx)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, 24(%ecx)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, 20(%ecx)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, 16(%ecx)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl (%esp), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 12(%ecx)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 8(%ecx)
@@ -3380,7 +3382,7 @@ define void @load_32byte_chunk_of_64byte_alloca_with_zero_upper_half(ptr %src, i
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 4(%ecx)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, (%ecx)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $156, %esp
+; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $172, %esp
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %edi
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %ebx
diff --git a/llvm/test/CodeGen/X86/widen-load-of-small-alloca.ll b/llvm/test/CodeGen/X86/widen-load-of-small-alloca.ll
index 7735500..bed8e58 100644
--- a/llvm/test/CodeGen/X86/widen-load-of-small-alloca.ll
+++ b/llvm/test/CodeGen/X86/widen-load-of-small-alloca.ll
@@ -1879,22 +1879,22 @@ define void @load_16byte_chunk_of_32byte_alloca(ptr %src, i64 %byteOff, ptr %dst
; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, %eax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrb $6, %al
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movzbl %al, %eax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rsi, -72(%rsp,%rax,8), %rcx
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -64(%rsp,%rax,8), %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rsi, %rdi, %r8
-; X64-HAVE-BMI2-NO-SHLD-NEXT: # kill: def $sil killed $sil killed $rsi def $rsi
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, %ecx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrb $6, %cl
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movzbl %cl, %ecx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, -72(%rsp,%rcx,8), %rdi
; X64-HAVE-BMI2-NO-SHLD-NEXT: notb %sil
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -56(%rsp,%rax,8), %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rdi, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %rdi, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rcx, %rdi
-; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rax, %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %rax, %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %r8, %rax
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rax, 8(%rdx)
-; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rdi, (%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -64(%rsp,%rcx,8), %r8
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq -56(%rsp,%rcx,8), %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: leaq (%r8,%r8), %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %r9, %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rdi, %r9
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shrxq %rax, %r8, %rax
+; X64-HAVE-BMI2-NO-SHLD-NEXT: addq %rcx, %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: shlxq %rsi, %rcx, %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: orq %rax, %rcx
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %rcx, 8(%rdx)
+; X64-HAVE-BMI2-NO-SHLD-NEXT: movq %r9, (%rdx)
; X64-HAVE-BMI2-NO-SHLD-NEXT: retq
;
; X64-HAVE-BMI2-HAVE-SHLD-LABEL: load_16byte_chunk_of_32byte_alloca:
@@ -2055,40 +2055,43 @@ define void @load_16byte_chunk_of_32byte_alloca(ptr %src, i64 %byteOff, ptr %dst
; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movaps %xmm0, {{[0-9]+}}(%esp)
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $5, %cl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %cl, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, 16(%esp,%ecx,4), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%esp,%ecx,4), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %esi, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrb $5, %dl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movzbl %dl, %esi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ecx, 16(%esp,%esi,4), %edx
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, %ebp
; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %ebx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %dl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %esi, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edi, %ebp
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%esp,%ecx,4), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $24, %bl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %bl
-; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%esi,%esi), %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %edi, %edi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%esp,%ecx,4), %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %eax, %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: notb %bl
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 20(%esp,%esi,4), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%edi,%edi), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %edx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 24(%esp,%esi,4), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebp, %edi, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: andb $24, %al
+; X86-HAVE-BMI2-NO-SHLD-NEXT: xorb $31, %al
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%ecx,%ecx), %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %edi, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %edx, %edi
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 28(%esp,%esi,4), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: leal (%ecx,%ecx), %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %edx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebp, {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ebx, %edx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shrxl %ebp, %ecx, %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 32(%esp,%esi,4), %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %esi, %esi
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %edx, %esi, %edx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl 32(%esp,%ecx,4), %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: addl %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %ebx, %ecx, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %eax, %ecx
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ecx, 12(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, 8(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, 4(%eax)
-; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %ebp, (%eax)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: shlxl %eax, %esi, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: orl %ecx, %eax
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, 12(%ecx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edx, 8(%ecx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %edi, 4(%ecx)
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
+; X86-HAVE-BMI2-NO-SHLD-NEXT: movl %eax, (%ecx)
; X86-HAVE-BMI2-NO-SHLD-NEXT: addl $92, %esp
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %esi
; X86-HAVE-BMI2-NO-SHLD-NEXT: popl %edi
diff --git a/llvm/test/CodeGen/X86/x86-shrink-wrapping.ll b/llvm/test/CodeGen/X86/x86-shrink-wrapping.ll
index 4d261a9..9fbbba2 100644
--- a/llvm/test/CodeGen/X86/x86-shrink-wrapping.ll
+++ b/llvm/test/CodeGen/X86/x86-shrink-wrapping.ll
@@ -820,7 +820,7 @@ define void @infiniteloop() {
; ENABLE-NEXT: movq %rsp, %rax
; ENABLE-NEXT: addq $-16, %rax
; ENABLE-NEXT: movq %rax, %rsp
-; ENABLE-NEXT: xorl %ecx, %ecx
+; ENABLE-NEXT: xorl %ecx, %ecx
; ENABLE-NEXT: .p2align 4
; ENABLE-NEXT: LBB10_2: ## %for.body
; ENABLE-NEXT: ## =>This Inner Loop Header: Depth=1
@@ -851,8 +851,8 @@ define void @infiniteloop() {
; DISABLE-NEXT: ## %bb.1: ## %if.then
; DISABLE-NEXT: movq %rsp, %rax
; DISABLE-NEXT: addq $-16, %rax
-; DISABLE-NEXT: %rax, %rsp
-; DISABLE-NEXT: xorl %ecx, %ecx
+; DISABLE-NEXT: movq %rax, %rsp
+; DISABLE-NEXT: xorl %ecx, %ecx
; DISABLE-NEXT: .p2align 4
; DISABLE-NEXT: LBB10_2: ## %for.body
; DISABLE-NEXT: ## =>This Inner Loop Header: Depth=1
@@ -1185,10 +1185,10 @@ define i32 @useLEAForPrologue(i32 %d, i32 %a, i8 %c) #3 {
; ENABLE-NEXT: .p2align 4
; ENABLE-NEXT: LBB14_2: ## %for.body
; ENABLE-NEXT: ## =>This Inner Loop Header: Depth=1
-; ENABLE-NEXT: cmpl %esi, %edi
-; ENABLE-NEXT: setl %al
+; ENABLE-NEXT: movl %esi, %eax
; ENABLE-NEXT: xorl %esi, %esi
-; ENABLE-NEXT: movb %al, %sil
+; ENABLE-NEXT: cmpl %eax, %edi
+; ENABLE-NEXT: setl %sil
; ENABLE-NEXT: incb %dl
; ENABLE-NEXT: cmpb $45, %dl
; ENABLE-NEXT: jl LBB14_2
@@ -1220,10 +1220,10 @@ define i32 @useLEAForPrologue(i32 %d, i32 %a, i8 %c) #3 {
; DISABLE-NEXT: .p2align 4
; DISABLE-NEXT: LBB14_2: ## %for.body
; DISABLE-NEXT: ## =>This Inner Loop Header: Depth=1
-; DISABLE-NEXT: cmpl %esi, %edi
-; DISABLE-NEXT: setl %al
+; DISABLE-NEXT: movl %esi, %eax
; DISABLE-NEXT: xorl %esi, %esi
-; DISABLE-NEXT: movb %al, %sil
+; DISABLE-NEXT: cmpl %eax, %edi
+; DISABLE-NEXT: setl %sil
; DISABLE-NEXT: incb %dl
; DISABLE-NEXT: cmpb $45, %dl
; DISABLE-NEXT: jl LBB14_2
diff --git a/llvm/test/CodeGen/X86/xor.ll b/llvm/test/CodeGen/X86/xor.ll
index 2bef668..59fbf71 100644
--- a/llvm/test/CodeGen/X86/xor.ll
+++ b/llvm/test/CodeGen/X86/xor.ll
@@ -62,12 +62,12 @@ define i32 @test4(i32 %a, i32 %b) nounwind {
; X86-NEXT: .p2align 4
; X86-NEXT: .LBB3_1: # %bb
; X86-NEXT: # =>This Inner Loop Header: Depth=1
+; X86-NEXT: movl %ecx, %edx
; X86-NEXT: xorl %ecx, %eax
-; X86-NEXT: movl %eax, %edx
-; X86-NEXT: notl %edx
-; X86-NEXT: andl %ecx, %edx
-; X86-NEXT: addl %edx, %edx
-; X86-NEXT: movl %edx, %ecx
+; X86-NEXT: movl %eax, %ecx
+; X86-NEXT: notl %ecx
+; X86-NEXT: andl %edx, %ecx
+; X86-NEXT: addl %ecx, %ecx
; X86-NEXT: jne .LBB3_1
; X86-NEXT: # %bb.2: # %bb12
; X86-NEXT: retl
@@ -78,12 +78,12 @@ define i32 @test4(i32 %a, i32 %b) nounwind {
; X64-LIN-NEXT: .p2align 4
; X64-LIN-NEXT: .LBB3_1: # %bb
; X64-LIN-NEXT: # =>This Inner Loop Header: Depth=1
+; X64-LIN-NEXT: movl %esi, %ecx
; X64-LIN-NEXT: xorl %esi, %eax
-; X64-LIN-NEXT: movl %eax, %ecx
-; X64-LIN-NEXT: notl %ecx
-; X64-LIN-NEXT: andl %esi, %ecx
-; X64-LIN-NEXT: addl %ecx, %ecx
-; X64-LIN-NEXT: movl %ecx, %esi
+; X64-LIN-NEXT: movl %eax, %esi
+; X64-LIN-NEXT: notl %esi
+; X64-LIN-NEXT: andl %ecx, %esi
+; X64-LIN-NEXT: addl %esi, %esi
; X64-LIN-NEXT: jne .LBB3_1
; X64-LIN-NEXT: # %bb.2: # %bb12
; X64-LIN-NEXT: retq
@@ -94,12 +94,12 @@ define i32 @test4(i32 %a, i32 %b) nounwind {
; X64-WIN-NEXT: .p2align 4
; X64-WIN-NEXT: .LBB3_1: # %bb
; X64-WIN-NEXT: # =>This Inner Loop Header: Depth=1
+; X64-WIN-NEXT: movl %edx, %ecx
; X64-WIN-NEXT: xorl %edx, %eax
-; X64-WIN-NEXT: movl %eax, %ecx
-; X64-WIN-NEXT: notl %ecx
-; X64-WIN-NEXT: andl %edx, %ecx
-; X64-WIN-NEXT: addl %ecx, %ecx
-; X64-WIN-NEXT: movl %ecx, %edx
+; X64-WIN-NEXT: movl %eax, %edx
+; X64-WIN-NEXT: notl %edx
+; X64-WIN-NEXT: andl %ecx, %edx
+; X64-WIN-NEXT: addl %edx, %edx
; X64-WIN-NEXT: jne .LBB3_1
; X64-WIN-NEXT: # %bb.2: # %bb12
; X64-WIN-NEXT: retq
@@ -126,13 +126,13 @@ define i16 @test5(i16 %a, i16 %b) nounwind {
; X86-NEXT: .p2align 4
; X86-NEXT: .LBB4_1: # %bb
; X86-NEXT: # =>This Inner Loop Header: Depth=1
-; X86-NEXT: xorl %ecx, %eax
-; X86-NEXT: movl %eax, %edx
-; X86-NEXT: notl %edx
-; X86-NEXT: andl %ecx, %edx
-; X86-NEXT: addl %edx, %edx
-; X86-NEXT: testw %dx, %dx
-; X86-NEXT: movl %edx, %ecx
+; X86-NEXT: movl %ecx, %edx
+; X86-NEXT: xorl %edx, %eax
+; X86-NEXT: movl %eax, %ecx
+; X86-NEXT: notl %ecx
+; X86-NEXT: andl %edx, %ecx
+; X86-NEXT: addl %ecx, %ecx
+; X86-NEXT: testw %cx, %cx
; X86-NEXT: jne .LBB4_1
; X86-NEXT: # %bb.2: # %bb12
; X86-NEXT: # kill: def $ax killed $ax killed $eax
@@ -144,13 +144,13 @@ define i16 @test5(i16 %a, i16 %b) nounwind {
; X64-LIN-NEXT: .p2align 4
; X64-LIN-NEXT: .LBB4_1: # %bb
; X64-LIN-NEXT: # =>This Inner Loop Header: Depth=1
-; X64-LIN-NEXT: xorl %esi, %eax
-; X64-LIN-NEXT: movl %eax, %ecx
-; X64-LIN-NEXT: notl %ecx
-; X64-LIN-NEXT: andl %esi, %ecx
-; X64-LIN-NEXT: addl %ecx, %ecx
-; X64-LIN-NEXT: testw %cx, %cx
-; X64-LIN-NEXT: movl %ecx, %esi
+; X64-LIN-NEXT: movl %esi, %ecx
+; X64-LIN-NEXT: xorl %ecx, %eax
+; X64-LIN-NEXT: movl %eax, %esi
+; X64-LIN-NEXT: notl %esi
+; X64-LIN-NEXT: andl %ecx, %esi
+; X64-LIN-NEXT: addl %esi, %esi
+; X64-LIN-NEXT: testw %si, %si
; X64-LIN-NEXT: jne .LBB4_1
; X64-LIN-NEXT: # %bb.2: # %bb12
; X64-LIN-NEXT: # kill: def $ax killed $ax killed $eax
@@ -163,13 +163,13 @@ define i16 @test5(i16 %a, i16 %b) nounwind {
; X64-WIN-NEXT: .p2align 4
; X64-WIN-NEXT: .LBB4_1: # %bb
; X64-WIN-NEXT: # =>This Inner Loop Header: Depth=1
-; X64-WIN-NEXT: xorl %edx, %eax
-; X64-WIN-NEXT: movl %eax, %ecx
-; X64-WIN-NEXT: notl %ecx
-; X64-WIN-NEXT: andl %edx, %ecx
-; X64-WIN-NEXT: addl %ecx, %ecx
-; X64-WIN-NEXT: testw %cx, %cx
-; X64-WIN-NEXT: movl %ecx, %edx
+; X64-WIN-NEXT: movl %edx, %ecx
+; X64-WIN-NEXT: xorl %ecx, %eax
+; X64-WIN-NEXT: movl %eax, %edx
+; X64-WIN-NEXT: notl %edx
+; X64-WIN-NEXT: andl %ecx, %edx
+; X64-WIN-NEXT: addl %edx, %edx
+; X64-WIN-NEXT: testw %dx, %dx
; X64-WIN-NEXT: jne .LBB4_1
; X64-WIN-NEXT: # %bb.2: # %bb12
; X64-WIN-NEXT: # kill: def $ax killed $ax killed $eax
@@ -197,12 +197,12 @@ define i8 @test6(i8 %a, i8 %b) nounwind {
; X86-NEXT: .p2align 4
; X86-NEXT: .LBB5_1: # %bb
; X86-NEXT: # =>This Inner Loop Header: Depth=1
+; X86-NEXT: movl %ecx, %edx
; X86-NEXT: xorb %cl, %al
-; X86-NEXT: movl %eax, %edx
-; X86-NEXT: notb %dl
-; X86-NEXT: andb %cl, %dl
-; X86-NEXT: addb %dl, %dl
-; X86-NEXT: movl %edx, %ecx
+; X86-NEXT: movl %eax, %ecx
+; X86-NEXT: notb %cl
+; X86-NEXT: andb %dl, %cl
+; X86-NEXT: addb %cl, %cl
; X86-NEXT: jne .LBB5_1
; X86-NEXT: # %bb.2: # %bb12
; X86-NEXT: retl
@@ -213,12 +213,12 @@ define i8 @test6(i8 %a, i8 %b) nounwind {
; X64-LIN-NEXT: .p2align 4
; X64-LIN-NEXT: .LBB5_1: # %bb
; X64-LIN-NEXT: # =>This Inner Loop Header: Depth=1
+; X64-LIN-NEXT: movl %esi, %ecx
; X64-LIN-NEXT: xorb %sil, %al
-; X64-LIN-NEXT: movl %eax, %ecx
-; X64-LIN-NEXT: notb %cl
-; X64-LIN-NEXT: andb %sil, %cl
-; X64-LIN-NEXT: addb %cl, %cl
-; X64-LIN-NEXT: movl %ecx, %esi
+; X64-LIN-NEXT: movl %eax, %esi
+; X64-LIN-NEXT: notb %sil
+; X64-LIN-NEXT: andb %cl, %sil
+; X64-LIN-NEXT: addb %sil, %sil
; X64-LIN-NEXT: jne .LBB5_1
; X64-LIN-NEXT: # %bb.2: # %bb12
; X64-LIN-NEXT: # kill: def $al killed $al killed $eax
@@ -230,12 +230,12 @@ define i8 @test6(i8 %a, i8 %b) nounwind {
; X64-WIN-NEXT: .p2align 4
; X64-WIN-NEXT: .LBB5_1: # %bb
; X64-WIN-NEXT: # =>This Inner Loop Header: Depth=1
+; X64-WIN-NEXT: movl %edx, %ecx
; X64-WIN-NEXT: xorb %dl, %al
-; X64-WIN-NEXT: movl %eax, %ecx
-; X64-WIN-NEXT: notb %cl
-; X64-WIN-NEXT: andb %dl, %cl
-; X64-WIN-NEXT: addb %cl, %cl
-; X64-WIN-NEXT: movl %ecx, %edx
+; X64-WIN-NEXT: movl %eax, %edx
+; X64-WIN-NEXT: notb %dl
+; X64-WIN-NEXT: andb %cl, %dl
+; X64-WIN-NEXT: addb %dl, %dl
; X64-WIN-NEXT: jne .LBB5_1
; X64-WIN-NEXT: # %bb.2: # %bb12
; X64-WIN-NEXT: retq
@@ -262,12 +262,12 @@ define i32 @test7(i32 %a, i32 %b) nounwind {
; X86-NEXT: .p2align 4
; X86-NEXT: .LBB6_1: # %bb
; X86-NEXT: # =>This Inner Loop Header: Depth=1
+; X86-NEXT: movl %ecx, %edx
; X86-NEXT: xorl %ecx, %eax
-; X86-NEXT: movl %eax, %edx
-; X86-NEXT: xorl $2147483646, %edx # imm = 0x7FFFFFFE
-; X86-NEXT: andl %ecx, %edx
-; X86-NEXT: addl %edx, %edx
-; X86-NEXT: movl %edx, %ecx
+; X86-NEXT: movl %eax, %ecx
+; X86-NEXT: xorl $2147483646, %ecx # imm = 0x7FFFFFFE
+; X86-NEXT: andl %edx, %ecx
+; X86-NEXT: addl %ecx, %ecx
; X86-NEXT: jne .LBB6_1
; X86-NEXT: # %bb.2: # %bb12
; X86-NEXT: retl
@@ -278,12 +278,12 @@ define i32 @test7(i32 %a, i32 %b) nounwind {
; X64-LIN-NEXT: .p2align 4
; X64-LIN-NEXT: .LBB6_1: # %bb
; X64-LIN-NEXT: # =>This Inner Loop Header: Depth=1
+; X64-LIN-NEXT: movl %esi, %ecx
; X64-LIN-NEXT: xorl %esi, %eax
-; X64-LIN-NEXT: movl %eax, %ecx
-; X64-LIN-NEXT: xorl $2147483646, %ecx # imm = 0x7FFFFFFE
-; X64-LIN-NEXT: andl %esi, %ecx
-; X64-LIN-NEXT: addl %ecx, %ecx
-; X64-LIN-NEXT: movl %ecx, %esi
+; X64-LIN-NEXT: movl %eax, %esi
+; X64-LIN-NEXT: xorl $2147483646, %esi # imm = 0x7FFFFFFE
+; X64-LIN-NEXT: andl %ecx, %esi
+; X64-LIN-NEXT: addl %esi, %esi
; X64-LIN-NEXT: jne .LBB6_1
; X64-LIN-NEXT: # %bb.2: # %bb12
; X64-LIN-NEXT: retq
@@ -294,12 +294,12 @@ define i32 @test7(i32 %a, i32 %b) nounwind {
; X64-WIN-NEXT: .p2align 4
; X64-WIN-NEXT: .LBB6_1: # %bb
; X64-WIN-NEXT: # =>This Inner Loop Header: Depth=1
+; X64-WIN-NEXT: movl %edx, %ecx
; X64-WIN-NEXT: xorl %edx, %eax
-; X64-WIN-NEXT: movl %eax, %ecx
-; X64-WIN-NEXT: xorl $2147483646, %ecx # imm = 0x7FFFFFFE
-; X64-WIN-NEXT: andl %edx, %ecx
-; X64-WIN-NEXT: addl %ecx, %ecx
-; X64-WIN-NEXT: movl %ecx, %edx
+; X64-WIN-NEXT: movl %eax, %edx
+; X64-WIN-NEXT: xorl $2147483646, %edx # imm = 0x7FFFFFFE
+; X64-WIN-NEXT: andl %ecx, %edx
+; X64-WIN-NEXT: addl %edx, %edx
; X64-WIN-NEXT: jne .LBB6_1
; X64-WIN-NEXT: # %bb.2: # %bb12
; X64-WIN-NEXT: retq
diff --git a/llvm/test/DebugInfo/MIR/X86/clobbered-fragments.mir b/llvm/test/DebugInfo/MIR/X86/clobbered-fragments.mir
index a334e99..ea01835 100644
--- a/llvm/test/DebugInfo/MIR/X86/clobbered-fragments.mir
+++ b/llvm/test/DebugInfo/MIR/X86/clobbered-fragments.mir
@@ -85,10 +85,11 @@
!15 = !DISubrange(count: 3)
!16 = !DILocation(line: 8, scope: !8)
!17 = !DILocation(line: 9, scope: !8)
- !18 = distinct !DISubprogram(name: "test2", scope: !2, file: !2, line: 7, type: !9, scopeLine: 7, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !1, retainedNodes: !11)
+ !18 = distinct !DISubprogram(name: "test2", scope: !2, file: !2, line: 7, type: !9, scopeLine: 7, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !1, retainedNodes: !22)
!19 = !DILocalVariable(name: "local", scope: !18, file: !2, line: 8, type: !13)
!20 = !DILocation(line: 15, scope: !18)
!21 = !DILocation(line: 16, scope: !18)
+ !22 = !{!19}
...
---
diff --git a/llvm/test/DebugInfo/MIR/X86/machine-cse.mir b/llvm/test/DebugInfo/MIR/X86/machine-cse.mir
index c38c0a1..63dc44f 100644
--- a/llvm/test/DebugInfo/MIR/X86/machine-cse.mir
+++ b/llvm/test/DebugInfo/MIR/X86/machine-cse.mir
@@ -73,13 +73,14 @@
!0 = !{i32 2, !"Debug Info Version", i32 3}
!1 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, file: !2, producer: "beards", isOptimized: true, runtimeVersion: 4, emissionKind: FullDebug)
!2 = !DIFile(filename: "bees.cpp", directory: "")
- !3 = distinct !DISubprogram(name: "nope", scope: !1, file: !2, line: 1, spFlags: DISPFlagDefinition, unit: !1, retainedNodes: !8)
- !33 = distinct !DISubprogram(name: "alsonope", scope: !1, file: !2, line: 1, spFlags: DISPFlagDefinition, unit: !1, retainedNodes: !8)
+ !3 = distinct !DISubprogram(name: "nope", scope: !1, file: !2, line: 1, spFlags: DISPFlagDefinition, unit: !1, retainedNodes: !9)
+ !33 = distinct !DISubprogram(name: "alsonope", scope: !1, file: !2, line: 1, spFlags: DISPFlagDefinition, unit: !1, retainedNodes: !9)
!4 = !DILocalVariable(name: "bees", scope: !3, type: !5)
!5 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !6, size: 64)
!6 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
!7 = !DILocation(line: 0, scope: !3)
!8 = !{!4}
+ !9 = !{}
; CHECK: ![[METAVAR:[0-9]+]] = !DILocalVariable(name: "bees",
diff --git a/llvm/test/DebugInfo/MIR/X86/remove-redundant-dbg-vals.mir b/llvm/test/DebugInfo/MIR/X86/remove-redundant-dbg-vals.mir
index 06ce18d..28fc044 100644
--- a/llvm/test/DebugInfo/MIR/X86/remove-redundant-dbg-vals.mir
+++ b/llvm/test/DebugInfo/MIR/X86/remove-redundant-dbg-vals.mir
@@ -139,15 +139,15 @@
!23 = !DISubprogram(name: "bar", scope: !1, file: !1, line: 1, type: !24, flags: DIFlagPrototyped, spFlags: DISPFlagOptimized, retainedNodes: !2)
!24 = !DISubroutineType(types: !25)
!25 = !{null, !11}
- !26 = distinct !DISubprogram(name: "foo2", scope: !1, file: !1, line: 4, type: !9, scopeLine: 4, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !12)
+ !26 = distinct !DISubprogram(name: "foo2", scope: !1, file: !1, line: 4, type: !9, scopeLine: 4, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !2)
!27 = !DILocation(line: 0, scope: !26)
- !28 = distinct !DISubprogram(name: "foo3", scope: !1, file: !1, line: 4, type: !9, scopeLine: 4, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !12)
+ !28 = distinct !DISubprogram(name: "foo3", scope: !1, file: !1, line: 4, type: !9, scopeLine: 4, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !2)
!29 = !DILocation(line: 0, scope: !28)
- !30 = distinct !DISubprogram(name: "foo4", scope: !1, file: !1, line: 4, type: !9, scopeLine: 4, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !12)
+ !30 = distinct !DISubprogram(name: "foo4", scope: !1, file: !1, line: 4, type: !9, scopeLine: 4, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !2)
!31 = !DILocation(line: 0, scope: !30)
- !32 = distinct !DISubprogram(name: "foo5", scope: !1, file: !1, line: 4, type: !9, scopeLine: 4, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !12)
+ !32 = distinct !DISubprogram(name: "foo5", scope: !1, file: !1, line: 4, type: !9, scopeLine: 4, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !2)
!33 = !DILocation(line: 0, scope: !32)
- !34 = distinct !DISubprogram(name: "foo6", scope: !1, file: !1, line: 4, type: !9, scopeLine: 4, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !12)
+ !34 = distinct !DISubprogram(name: "foo6", scope: !1, file: !1, line: 4, type: !9, scopeLine: 4, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !2)
!35 = !DILocation(line: 0, scope: !34)
...
diff --git a/llvm/test/DebugInfo/X86/instr-ref-selectiondag.ll b/llvm/test/DebugInfo/X86/instr-ref-selectiondag.ll
index dbbef2b..594607c 100644
--- a/llvm/test/DebugInfo/X86/instr-ref-selectiondag.ll
+++ b/llvm/test/DebugInfo/X86/instr-ref-selectiondag.ll
@@ -281,15 +281,19 @@ lala:
!11 = !{!13}
!13 = !DILocalVariable(name: "baz", scope: !7, file: !1, line: 6, type: !10)
!14 = !DILocation(line: 1, scope: !7)
-!20 = distinct !DISubprogram(name: "bar", scope: !1, file: !1, line: 5, type: !8, scopeLine: 5, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !11)
+!20 = distinct !DISubprogram(name: "bar", scope: !1, file: !1, line: 5, type: !8, scopeLine: 5, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !23)
!21 = !DILocalVariable(name: "xyzzy", scope: !20, file: !1, line: 6, type: !10)
!22 = !DILocation(line: 1, scope: !20)
-!30 = distinct !DISubprogram(name: "bar", scope: !1, file: !1, line: 5, type: !8, scopeLine: 5, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !11)
+!23 = !{!21}
+!30 = distinct !DISubprogram(name: "bar", scope: !1, file: !1, line: 5, type: !8, scopeLine: 5, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !33)
!31 = !DILocalVariable(name: "xyzzy", scope: !30, file: !1, line: 6, type: !10)
!32 = !DILocation(line: 1, scope: !30)
-!40 = distinct !DISubprogram(name: "qux", scope: !1, file: !1, line: 5, type: !8, scopeLine: 5, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !11)
+!33 = !{!31}
+!40 = distinct !DISubprogram(name: "qux", scope: !1, file: !1, line: 5, type: !8, scopeLine: 5, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !46)
!41 = !DILocalVariable(name: "socks", scope: !40, file: !1, line: 6, type: !10)
!42 = !DILocation(line: 1, scope: !40)
-!43 = distinct !DISubprogram(name: "inlined", scope: !1, file: !1, line: 5, type: !8, scopeLine: 5, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !11)
+!43 = distinct !DISubprogram(name: "inlined", scope: !1, file: !1, line: 5, type: !8, scopeLine: 5, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !47)
!44 = !DILocation(line: 0, scope: !43, inlinedAt: !42)
!45 = !DILocalVariable(name: "knees", scope: !43, file: !1, line: 6, type: !10)
+!46 = !{!41}
+!47 = !{!45}
diff --git a/llvm/test/DebugInfo/X86/live-debug-values-constprop.mir b/llvm/test/DebugInfo/X86/live-debug-values-constprop.mir
index 8a05376..2900f0b 100644
--- a/llvm/test/DebugInfo/X86/live-debug-values-constprop.mir
+++ b/llvm/test/DebugInfo/X86/live-debug-values-constprop.mir
@@ -82,15 +82,18 @@
!14 = !DISubroutineType(types: !15)
!15 = !{!16}
!16 = !DIBasicType(name: "int", size: 32, align: 32, encoding: DW_ATE_signed)
- !40 = distinct !DISubprogram(name: "bar", scope: !2, file: !2, line: 1, spFlags: DISPFlagDefinition, unit: !1, retainedNodes: !13, type: !14, isDefinition: true)
+ !40 = distinct !DISubprogram(name: "bar", scope: !2, file: !2, line: 1, spFlags: DISPFlagDefinition, unit: !1, retainedNodes: !43, type: !14, isDefinition: true)
!41 = !DILocalVariable(name: "towel", scope: !40, file: !2, line: 1, type: !16)
!42 = !DILocation(line: 40, scope: !40)
- !80 = distinct !DISubprogram(name: "baz", scope: !2, file: !2, line: 1, spFlags: DISPFlagDefinition, unit: !1, retainedNodes: !13, type: !14, isDefinition: true)
+ !43 = !{!41}
+ !80 = distinct !DISubprogram(name: "baz", scope: !2, file: !2, line: 1, spFlags: DISPFlagDefinition, unit: !1, retainedNodes: !83, type: !14, isDefinition: true)
!81 = !DILocalVariable(name: "socks", scope: !80, file: !2, line: 1, type: !16)
!82 = !DILocation(line: 40, scope: !80)
- !120 = distinct !DISubprogram(name: "qux", scope: !2, file: !2, line: 1, spFlags: DISPFlagDefinition, unit: !1, retainedNodes: !13, type: !14, isDefinition: true)
+ !83 = !{!81}
+ !120 = distinct !DISubprogram(name: "qux", scope: !2, file: !2, line: 1, spFlags: DISPFlagDefinition, unit: !1, retainedNodes: !123, type: !14, isDefinition: true)
!121 = !DILocalVariable(name: "shoes", scope: !120, file: !2, line: 1, type: !16)
!122 = !DILocation(line: 40, scope: !120)
+ !123 = !{!121}
...
---
diff --git a/llvm/test/DebugInfo/X86/live-debug-values-remove-range.ll b/llvm/test/DebugInfo/X86/live-debug-values-remove-range.ll
index e656c62..145b504 100644
--- a/llvm/test/DebugInfo/X86/live-debug-values-remove-range.ll
+++ b/llvm/test/DebugInfo/X86/live-debug-values-remove-range.ll
@@ -108,6 +108,6 @@ exit:
!106 = !DILocation(line: 1, scope: !104)
!113 = !{!103}
!203 = !DILocalVariable(name: "teacake", scope: !204, file: !2, line: 1, type: !16)
-!204 = distinct !DISubprogram(name: "toad", scope: !2, file: !2, line: 1, spFlags: DISPFlagDefinition, unit: !1, retainedNodes: !113, type: !14, isDefinition: true)
+!204 = distinct !DISubprogram(name: "toad", scope: !2, file: !2, line: 1, spFlags: DISPFlagDefinition, unit: !1, retainedNodes: !213, type: !14, isDefinition: true)
!206 = !DILocation(line: 1, scope: !204)
!213 = !{!203}
diff --git a/llvm/test/DebugInfo/X86/live-debug-vars-intervals.mir b/llvm/test/DebugInfo/X86/live-debug-vars-intervals.mir
index 3beaf89..ab57a96 100644
--- a/llvm/test/DebugInfo/X86/live-debug-vars-intervals.mir
+++ b/llvm/test/DebugInfo/X86/live-debug-vars-intervals.mir
@@ -91,7 +91,7 @@
!10 = !{!11}
!11 = !DILocalVariable(name: "x", arg: 1, scope: !6, file: !1, line: 3, type: !9)
!12 = !DILocation(line: 3, column: 12, scope: !6)
- !13 = distinct !DISubprogram(name: "f2", scope: !1, file: !1, line: 20, type: !7, scopeLine: 20, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !10)
+ !13 = distinct !DISubprogram(name: "f2", scope: !1, file: !1, line: 20, type: !7, scopeLine: 20, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !{!14})
!14 = !DILocalVariable(name: "x", arg: 1, scope: !13, file: !1, line: 21, type: !9)
!15 = !DILocation(line: 23, column: 12, scope: !13)
diff --git a/llvm/test/Linker/thinlto_funcimport_debug.ll b/llvm/test/Linker/thinlto_funcimport_debug.ll
index 294b3a7..4454a56 100644
--- a/llvm/test/Linker/thinlto_funcimport_debug.ll
+++ b/llvm/test/Linker/thinlto_funcimport_debug.ll
@@ -80,8 +80,8 @@ attributes #1 = { nounwind readnone }
!26 = !DILocation(line: 9, column: 3, scope: !4)
!27 = distinct !DISubprogram(name: "func3", scope: !1, file: !1, line: 8, type: !5, isLocal: false, isDefinition: true, scopeLine: 8, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !28)
!28 = !{!29}
-!29 = !DILocalVariable(name: "n", arg: 1, scope: !30, file: !1, line: 8, type: !7)
+!29 = !DILocalVariable(name: "n", arg: 1, scope: !27, file: !1, line: 8, type: !33)
!30 = distinct !DISubprogram(name: "func4", scope: !1, file: !1, line: 8, type: !5, isLocal: false, isDefinition: true, scopeLine: 8, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !31)
!31 = !{!32}
!32 = !DILocalVariable(name: "n", arg: 1, scope: !30, file: !1, line: 8, type: !7)
-
+!33 = !DIDerivedType(tag: DW_TAG_typedef, name: "size_t", scope: !30, file: !1, line: 13, baseType: !7)
diff --git a/llvm/test/MC/AArch64/prfum.s b/llvm/test/MC/AArch64/prfum.s
new file mode 100644
index 0000000..81a864a
--- /dev/null
+++ b/llvm/test/MC/AArch64/prfum.s
@@ -0,0 +1,44 @@
+// RUN: llvm-mc -triple=aarch64 -show-encoding --print-imm-hex=false < %s \
+// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
+// RUN: llvm-mc -triple=aarch64 -filetype=obj < %s \
+// RUN: | llvm-objdump -d --print-imm-hex=false - | FileCheck %s --check-prefix=CHECK-INST
+// Disassemble encoding and check the re-encoding (-show-encoding) matches.
+// RUN: llvm-mc -triple=aarch64 -show-encoding < %s \
+// RUN: | sed '/.text/d' | sed 's/.*encoding: //g' \
+// RUN: | llvm-mc -triple=aarch64 -disassemble -show-encoding --print-imm-hex=false \
+// RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
+
+// PRFM falls back to PRFUM for negative or unaligned offsets (not a multiple
+// of 8).
+
+prfm pldl1keep, [x0, #-256]
+// CHECK-INST: prfum pldl1keep, [x0, #-256]
+// CHECK-ENCODING: [0x00,0x00,0x90,0xf8]
+
+prfm pldl1keep, [x0, #-8]
+// CHECK-INST: prfum pldl1keep, [x0, #-8]
+// CHECK-ENCODING: [0x00,0x80,0x9f,0xf8]
+
+prfm pldl1keep, [x0, #-1]
+// CHECK-INST: prfum pldl1keep, [x0, #-1]
+// CHECK-ENCODING: [0x00,0xf0,0x9f,0xf8]
+
+prfm pldl1keep, [x0, #0]
+// CHECK-INST: prfm pldl1keep, [x0]
+// CHECK-ENCODING: [0x00,0x00,0x80,0xf9]
+
+prfm pldl1keep, [x0, #1]
+// CHECK-INST: prfum pldl1keep, [x0, #1]
+// CHECK-ENCODING: [0x00,0x10,0x80,0xf8]
+
+prfm pldl1keep, [x0, #8]
+// CHECK-INST: prfm pldl1keep, [x0, #8]
+// CHECK-ENCODING: [0x00,0x04,0x80,0xf9]
+
+prfm pldl1keep, [x0, #255]
+// CHECK-INST: prfum pldl1keep, [x0, #255]
+// CHECK-ENCODING: [0x00,0xf0,0x8f,0xf8]
+
+prfm pldl1keep, [x0, #256]
+// CHECK-INST: prfm pldl1keep, [x0, #256]
+// CHECK-ENCODING: [0x00,0x80,0x80,0xf9]
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop1_err.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop1_err.s
index c393d3e..3f6d8fe 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop1_err.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop1_err.s
@@ -34,3 +34,83 @@ v_cvt_f32_bf16 v5, v1 div:2
// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
// GFX1250-ERR-NEXT:{{^}}v_cvt_f32_bf16 v5, v1 div:2
// GFX1250-ERR-NEXT:{{^}} ^
+
+v_cos_bf16 v1, v2 clamp
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX1250-ERR-NEXT:{{^}}v_cos_bf16 v1, v2 clamp
+// GFX1250-ERR-NEXT:{{^}} ^
+
+v_cos_bf16 v1, v2 mul:2
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1250-ERR-NEXT:{{^}}v_cos_bf16 v1, v2 mul:2
+// GFX1250-ERR-NEXT:{{^}} ^
+
+v_exp_bf16 v1, v2 clamp
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX1250-ERR-NEXT:{{^}}v_exp_bf16 v1, v2 clamp
+// GFX1250-ERR-NEXT:{{^}} ^
+
+v_exp_bf16 v1, v2 mul:2
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1250-ERR-NEXT:{{^}}v_exp_bf16 v1, v2 mul:2
+// GFX1250-ERR-NEXT:{{^}} ^
+
+v_log_bf16 v1, v2 clamp
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX1250-ERR-NEXT:{{^}}v_log_bf16 v1, v2 clamp
+// GFX1250-ERR-NEXT:{{^}} ^
+
+v_log_bf16 v1, v2 mul:2
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1250-ERR-NEXT:{{^}}v_log_bf16 v1, v2 mul:2
+// GFX1250-ERR-NEXT:{{^}} ^
+
+v_rcp_bf16 v1, v2 clamp
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX1250-ERR-NEXT:{{^}}v_rcp_bf16 v1, v2 clamp
+// GFX1250-ERR-NEXT:{{^}} ^
+
+v_rcp_bf16 v1, v2 mul:2
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1250-ERR-NEXT:{{^}}v_rcp_bf16 v1, v2 mul:2
+// GFX1250-ERR-NEXT:{{^}} ^
+
+v_rsq_bf16 v1, v2 clamp
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX1250-ERR-NEXT:{{^}}v_rsq_bf16 v1, v2 clamp
+// GFX1250-ERR-NEXT:{{^}} ^
+
+v_rsq_bf16 v1, v2 mul:2
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1250-ERR-NEXT:{{^}}v_rsq_bf16 v1, v2 mul:2
+// GFX1250-ERR-NEXT:{{^}} ^
+
+v_sin_bf16 v1, v2 clamp
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX1250-ERR-NEXT:{{^}}v_sin_bf16 v1, v2 clamp
+// GFX1250-ERR-NEXT:{{^}} ^
+
+v_sin_bf16 v1, v2 mul:2
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1250-ERR-NEXT:{{^}}v_sin_bf16 v1, v2 mul:2
+// GFX1250-ERR-NEXT:{{^}} ^
+
+v_sqrt_bf16 v1, v2 clamp
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX1250-ERR-NEXT:{{^}}v_sqrt_bf16 v1, v2 clamp
+// GFX1250-ERR-NEXT:{{^}} ^
+
+v_sqrt_bf16 v1, v2 mul:2
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1250-ERR-NEXT:{{^}}v_sqrt_bf16 v1, v2 mul:2
+// GFX1250-ERR-NEXT:{{^}} ^
+
+v_tanh_bf16 v1, v2 clamp
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX1250-ERR-NEXT:{{^}}v_tanh_bf16 v1, v2 clamp
+// GFX1250-ERR-NEXT:{{^}} ^
+
+v_tanh_bf16 v1, v2 mul:2
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1250-ERR-NEXT:{{^}}v_tanh_bf16 v1, v2 mul:2
+// GFX1250-ERR-NEXT:{{^}} ^
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1-fake16.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1-fake16.s
index 0931523..37ad6eb 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1-fake16.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1-fake16.s
@@ -3781,15 +3781,6 @@ v_tanh_bf16_e64 v5, null
v_tanh_bf16_e64 v5, -1
// GFX1250: v_tanh_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xca,0xd5,0xc1,0x00,0x00,0x00]
-v_tanh_bf16_e64 v5, 0.5 mul:2
-// GFX1250: v_tanh_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xca,0xd5,0xf0,0x00,0x00,0x08]
-
-v_tanh_bf16_e64 v5, src_scc mul:4
-// GFX1250: v_tanh_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xca,0xd5,0xfd,0x00,0x00,0x10]
-
-v_tanh_bf16_e64 v255, -|0x8000| clamp div:2
-// GFX1250: v_tanh_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xca,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
v_prng_b32_e64 v5, v1
// GFX1250: v_prng_b32_e64 v5, v1 ; encoding: [0x05,0x00,0xcb,0xd5,0x01,0x01,0x00,0x00]
@@ -3862,15 +3853,6 @@ v_rcp_bf16_e64 v5, null
v_rcp_bf16_e64 v5, -1
// GFX1250: v_rcp_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xf9,0xd5,0xc1,0x00,0x00,0x00]
-v_rcp_bf16_e64 v5, 0.5 mul:2
-// GFX1250: v_rcp_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xf9,0xd5,0xf0,0x00,0x00,0x08]
-
-v_rcp_bf16_e64 v5, src_scc mul:4
-// GFX1250: v_rcp_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xf9,0xd5,0xfd,0x00,0x00,0x10]
-
-v_rcp_bf16_e64 v255, -|0x8000| clamp div:2
-// GFX1250: v_rcp_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xf9,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
v_sqrt_bf16_e64 v5, v1
// GFX1250: v_sqrt_bf16_e64 v5, v1 ; encoding: [0x05,0x00,0xfa,0xd5,0x01,0x01,0x00,0x00]
@@ -3907,15 +3889,6 @@ v_sqrt_bf16_e64 v5, null
v_sqrt_bf16_e64 v5, -1
// GFX1250: v_sqrt_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xfa,0xd5,0xc1,0x00,0x00,0x00]
-v_sqrt_bf16_e64 v5, 0.5 mul:2
-// GFX1250: v_sqrt_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xfa,0xd5,0xf0,0x00,0x00,0x08]
-
-v_sqrt_bf16_e64 v5, src_scc mul:4
-// GFX1250: v_sqrt_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xfa,0xd5,0xfd,0x00,0x00,0x10]
-
-v_sqrt_bf16_e64 v255, -|0x8000| clamp div:2
-// GFX1250: v_sqrt_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfa,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
v_rsq_bf16_e64 v5, v1
// GFX1250: v_rsq_bf16_e64 v5, v1 ; encoding: [0x05,0x00,0xfb,0xd5,0x01,0x01,0x00,0x00]
@@ -3952,15 +3925,6 @@ v_rsq_bf16_e64 v5, null
v_rsq_bf16_e64 v5, -1
// GFX1250: v_rsq_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xfb,0xd5,0xc1,0x00,0x00,0x00]
-v_rsq_bf16_e64 v5, 0.5 mul:2
-// GFX1250: v_rsq_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xfb,0xd5,0xf0,0x00,0x00,0x08]
-
-v_rsq_bf16_e64 v5, src_scc mul:4
-// GFX1250: v_rsq_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xfb,0xd5,0xfd,0x00,0x00,0x10]
-
-v_rsq_bf16_e64 v255, -|0x8000| clamp div:2
-// GFX1250: v_rsq_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfb,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
v_log_bf16_e64 v5, v1
// GFX1250: v_log_bf16_e64 v5, v1 ; encoding: [0x05,0x00,0xfc,0xd5,0x01,0x01,0x00,0x00]
@@ -3997,15 +3961,6 @@ v_log_bf16_e64 v5, null
v_log_bf16_e64 v5, -1
// GFX1250: v_log_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xfc,0xd5,0xc1,0x00,0x00,0x00]
-v_log_bf16_e64 v5, 0.5 mul:2
-// GFX1250: v_log_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xfc,0xd5,0xf0,0x00,0x00,0x08]
-
-v_log_bf16_e64 v5, src_scc mul:4
-// GFX1250: v_log_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xfc,0xd5,0xfd,0x00,0x00,0x10]
-
-v_log_bf16_e64 v255, -|0x8000| clamp div:2
-// GFX1250: v_log_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfc,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
v_exp_bf16_e64 v5, v1
// GFX1250: v_exp_bf16_e64 v5, v1 ; encoding: [0x05,0x00,0xfd,0xd5,0x01,0x01,0x00,0x00]
@@ -4042,15 +3997,6 @@ v_exp_bf16_e64 v5, null
v_exp_bf16_e64 v5, -1
// GFX1250: v_exp_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xfd,0xd5,0xc1,0x00,0x00,0x00]
-v_exp_bf16_e64 v5, 0.5 mul:2
-// GFX1250: v_exp_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xfd,0xd5,0xf0,0x00,0x00,0x08]
-
-v_exp_bf16_e64 v5, src_scc mul:4
-// GFX1250: v_exp_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xfd,0xd5,0xfd,0x00,0x00,0x10]
-
-v_exp_bf16_e64 v255, -|0x8000| clamp div:2
-// GFX1250: v_exp_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfd,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
v_sin_bf16_e64 v5, v1
// GFX1250: v_sin_bf16_e64 v5, v1 ; encoding: [0x05,0x00,0xfe,0xd5,0x01,0x01,0x00,0x00]
@@ -4087,15 +4033,6 @@ v_sin_bf16_e64 v5, null
v_sin_bf16_e64 v5, -1
// GFX1250: v_sin_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xfe,0xd5,0xc1,0x00,0x00,0x00]
-v_sin_bf16_e64 v5, 0.5 mul:2
-// GFX1250: v_sin_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xfe,0xd5,0xf0,0x00,0x00,0x08]
-
-v_sin_bf16_e64 v5, src_scc mul:4
-// GFX1250: v_sin_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xfe,0xd5,0xfd,0x00,0x00,0x10]
-
-v_sin_bf16_e64 v255, -|0x8000| clamp div:2
-// GFX1250: v_sin_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfe,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
v_cos_bf16_e64 v5, v1
// GFX1250: v_cos_bf16_e64 v5, v1 ; encoding: [0x05,0x00,0xff,0xd5,0x01,0x01,0x00,0x00]
@@ -4132,15 +4069,6 @@ v_cos_bf16_e64 v5, null
v_cos_bf16_e64 v5, -1
// GFX1250: v_cos_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xff,0xd5,0xc1,0x00,0x00,0x00]
-v_cos_bf16_e64 v5, 0.5 mul:2
-// GFX1250: v_cos_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xff,0xd5,0xf0,0x00,0x00,0x08]
-
-v_cos_bf16_e64 v5, src_scc mul:4
-// GFX1250: v_cos_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xff,0xd5,0xfd,0x00,0x00,0x10]
-
-v_cos_bf16_e64 v255, -|0x8000| clamp div:2
-// GFX1250: v_cos_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xff,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
v_cvt_f32_bf16_e64 v5, v1
// GFX1250: v_cvt_f32_bf16_e64 v5, v1 ; encoding: [0x05,0x00,0xf2,0xd5,0x01,0x01,0x00,0x00]
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1.s
index 5ac9eb4..52f9ba3 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1.s
@@ -3952,15 +3952,6 @@ v_tanh_bf16_e64 v5.l, null
v_tanh_bf16_e64 v5.l, -1
// GFX1250: v_tanh_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xca,0xd5,0xc1,0x00,0x00,0x00]
-v_tanh_bf16_e64 v5.l, 0.5 mul:2
-// GFX1250: v_tanh_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xca,0xd5,0xf0,0x00,0x00,0x08]
-
-v_tanh_bf16_e64 v5.l, src_scc mul:4
-// GFX1250: v_tanh_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xca,0xd5,0xfd,0x00,0x00,0x10]
-
-v_tanh_bf16_e64 v255.l, -|0x8000| clamp div:2
-// GFX1250: v_tanh_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xca,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
v_tanh_bf16 v5.l, v128.h
// GFX1250: v_tanh_bf16_e64 v5.l, v128.h op_sel:[1,0] ; encoding: [0x05,0x08,0xca,0xd5,0x80,0x01,0x00,0x00]
@@ -4036,15 +4027,6 @@ v_rcp_bf16_e64 v5.l, null
v_rcp_bf16_e64 v5.l, -1
// GFX1250: v_rcp_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xf9,0xd5,0xc1,0x00,0x00,0x00]
-v_rcp_bf16_e64 v5.l, 0.5 mul:2
-// GFX1250: v_rcp_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xf9,0xd5,0xf0,0x00,0x00,0x08]
-
-v_rcp_bf16_e64 v5.l, src_scc mul:4
-// GFX1250: v_rcp_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xf9,0xd5,0xfd,0x00,0x00,0x10]
-
-v_rcp_bf16_e64 v255.l, -|0x8000| clamp div:2
-// GFX1250: v_rcp_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xf9,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
v_rcp_bf16 v5.h, v128.h
// GFX1250: v_rcp_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xf9,0xd5,0x80,0x01,0x00,0x00]
@@ -4084,15 +4066,6 @@ v_sqrt_bf16_e64 v5.l, null
v_sqrt_bf16_e64 v5.l, -1
// GFX1250: v_sqrt_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xfa,0xd5,0xc1,0x00,0x00,0x00]
-v_sqrt_bf16_e64 v5.l, 0.5 mul:2
-// GFX1250: v_sqrt_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xfa,0xd5,0xf0,0x00,0x00,0x08]
-
-v_sqrt_bf16_e64 v5.l, src_scc mul:4
-// GFX1250: v_sqrt_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xfa,0xd5,0xfd,0x00,0x00,0x10]
-
-v_sqrt_bf16_e64 v255.l, -|0x8000| clamp div:2
-// GFX1250: v_sqrt_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfa,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
v_sqrt_bf16 v5.h, v128.h
// GFX1250: v_sqrt_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xfa,0xd5,0x80,0x01,0x00,0x00]
@@ -4132,15 +4105,6 @@ v_rsq_bf16_e64 v5.l, null
v_rsq_bf16_e64 v5.l, -1
// GFX1250: v_rsq_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xfb,0xd5,0xc1,0x00,0x00,0x00]
-v_rsq_bf16_e64 v5.l, 0.5 mul:2
-// GFX1250: v_rsq_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xfb,0xd5,0xf0,0x00,0x00,0x08]
-
-v_rsq_bf16_e64 v5.l, src_scc mul:4
-// GFX1250: v_rsq_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xfb,0xd5,0xfd,0x00,0x00,0x10]
-
-v_rsq_bf16_e64 v255.l, -|0x8000| clamp div:2
-// GFX1250: v_rsq_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfb,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
v_rsq_bf16 v5.h, v128.h
// GFX1250: v_rsq_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xfb,0xd5,0x80,0x01,0x00,0x00]
@@ -4180,15 +4144,6 @@ v_log_bf16_e64 v5.l, null
v_log_bf16_e64 v5.l, -1
// GFX1250: v_log_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xfc,0xd5,0xc1,0x00,0x00,0x00]
-v_log_bf16_e64 v5.l, 0.5 mul:2
-// GFX1250: v_log_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xfc,0xd5,0xf0,0x00,0x00,0x08]
-
-v_log_bf16_e64 v5.l, src_scc mul:4
-// GFX1250: v_log_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xfc,0xd5,0xfd,0x00,0x00,0x10]
-
-v_log_bf16_e64 v255.l, -|0x8000| clamp div:2
-// GFX1250: v_log_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfc,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
v_log_bf16 v5.h, v128.h
// GFX1250: v_log_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xfc,0xd5,0x80,0x01,0x00,0x00]
@@ -4228,15 +4183,6 @@ v_exp_bf16_e64 v5.l, null
v_exp_bf16_e64 v5.l, -1
// GFX1250: v_exp_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xfd,0xd5,0xc1,0x00,0x00,0x00]
-v_exp_bf16_e64 v5.l, 0.5 mul:2
-// GFX1250: v_exp_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xfd,0xd5,0xf0,0x00,0x00,0x08]
-
-v_exp_bf16_e64 v5.l, src_scc mul:4
-// GFX1250: v_exp_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xfd,0xd5,0xfd,0x00,0x00,0x10]
-
-v_exp_bf16_e64 v255.l, -|0x8000| clamp div:2
-// GFX1250: v_exp_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfd,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
v_exp_bf16 v5.h, v128.h
// GFX1250: v_exp_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xfd,0xd5,0x80,0x01,0x00,0x00]
@@ -4276,15 +4222,6 @@ v_sin_bf16_e64 v5.l, null
v_sin_bf16_e64 v5.l, -1
// GFX1250: v_sin_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xfe,0xd5,0xc1,0x00,0x00,0x00]
-v_sin_bf16_e64 v5.l, 0.5 mul:2
-// GFX1250: v_sin_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xfe,0xd5,0xf0,0x00,0x00,0x08]
-
-v_sin_bf16_e64 v5.l, src_scc mul:4
-// GFX1250: v_sin_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xfe,0xd5,0xfd,0x00,0x00,0x10]
-
-v_sin_bf16_e64 v255.l, -|0x8000| clamp div:2
-// GFX1250: v_sin_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfe,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
v_sin_bf16 v5.h, v128.h
// GFX1250: v_sin_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xfe,0xd5,0x80,0x01,0x00,0x00]
@@ -4324,15 +4261,6 @@ v_cos_bf16_e64 v5.l, null
v_cos_bf16_e64 v5.l, -1
// GFX1250: v_cos_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xff,0xd5,0xc1,0x00,0x00,0x00]
-v_cos_bf16_e64 v5.l, 0.5 mul:2
-// GFX1250: v_cos_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xff,0xd5,0xf0,0x00,0x00,0x08]
-
-v_cos_bf16_e64 v5.l, src_scc mul:4
-// GFX1250: v_cos_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xff,0xd5,0xfd,0x00,0x00,0x10]
-
-v_cos_bf16_e64 v255.l, -|0x8000| clamp div:2
-// GFX1250: v_cos_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xff,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
v_cos_bf16_e64 v5.h, v128.h
// GFX1250: v_cos_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xff,0xd5,0x80,0x01,0x00,0x00]
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp16-fake16.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp16-fake16.s
index b21fca6..21077fe 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp16-fake16.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp16-fake16.s
@@ -158,18 +158,6 @@ v_tanh_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf
// GFX1250: v_tanh_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_tanh_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_tanh_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_tanh_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_tanh_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_tanh_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xca,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_prng_b32_e64_dpp v5, v1 quad_perm:[3,2,1,0]
// GFX1250: v_prng_b32_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xcb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -258,18 +246,6 @@ v_rcp_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf
// GFX1250: v_rcp_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rcp_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_rcp_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_rcp_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_rcp_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_rcp_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_rcp_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xf9,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_sqrt_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0]
// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -314,18 +290,6 @@ v_sqrt_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf
// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sqrt_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_sqrt_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_sqrt_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_sqrt_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfa,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_rsq_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0]
// GFX1250: v_rsq_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -370,18 +334,6 @@ v_rsq_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf
// GFX1250: v_rsq_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rsq_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_rsq_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_rsq_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_rsq_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_rsq_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_rsq_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfb,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_log_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0]
// GFX1250: v_log_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -426,18 +378,6 @@ v_log_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf
// GFX1250: v_log_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_log_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_log_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_log_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_log_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_log_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_log_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfc,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_exp_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0]
// GFX1250: v_exp_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -482,18 +422,6 @@ v_exp_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf
// GFX1250: v_exp_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_exp_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_exp_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_exp_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_exp_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_exp_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_exp_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfd,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_sin_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0]
// GFX1250: v_sin_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -538,18 +466,6 @@ v_sin_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf
// GFX1250: v_sin_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sin_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_sin_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_sin_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_sin_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_sin_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_sin_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfe,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_cos_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0]
// GFX1250: v_cos_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -594,18 +510,6 @@ v_cos_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf
// GFX1250: v_cos_bf16_e64_dpp v5, v1 row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cos_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_cos_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_cos_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_cos_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_cos_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_cos_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xff,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_cvt_f32_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0]
// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf2,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp16.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp16.s
index d163856..646acf5 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp16.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp16.s
@@ -162,18 +162,6 @@ v_tanh_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf
// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_tanh_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_tanh_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_tanh_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xca,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_tanh_bf16_e64_dpp v5.h, v128.h quad_perm:[3,2,1,0]
// GFX1250: v_tanh_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xca,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -266,18 +254,6 @@ v_rcp_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf
// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rcp_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_rcp_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_rcp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_rcp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xf9,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_rcp_bf16_e64_dpp v5.h, v128.h quad_perm:[3,2,1,0]
// GFX1250: v_rcp_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -326,18 +302,6 @@ v_sqrt_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf
// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sqrt_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_sqrt_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_sqrt_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_sqrt_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfa,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_sqrt_bf16_e64_dpp v5.h, v128.h quad_perm:[3,2,1,0]
// GFX1250: v_sqrt_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -386,18 +350,6 @@ v_rsq_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf
// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rsq_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_rsq_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_rsq_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_rsq_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfb,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_rsq_bf16_e64_dpp v5.h, v128.h quad_perm:[3,2,1,0]
// GFX1250: v_rsq_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -446,18 +398,6 @@ v_log_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf
// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_log_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_log_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_log_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_log_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfc,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_log_bf16_e64_dpp v5.h, v128.h quad_perm:[3,2,1,0]
// GFX1250: v_log_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -506,18 +446,6 @@ v_exp_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf
// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_exp_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_exp_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_exp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_exp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfd,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_exp_bf16_e64_dpp v5.h, v128.h quad_perm:[3,2,1,0]
// GFX1250: v_exp_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -566,18 +494,6 @@ v_sin_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf
// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sin_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_sin_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_sin_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_sin_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfe,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_sin_bf16_e64_dpp v5.h, v128.h quad_perm:[3,2,1,0]
// GFX1250: v_sin_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -626,18 +542,6 @@ v_cos_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf
// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0x50,0x01,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cos_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_cos_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_cos_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX1250: v_cos_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xff,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_cos_bf16_e64_dpp v5.h, v128.h quad_perm:[3,2,1,0]
// GFX1250: v_cos_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xff,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp8-fake16.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp8-fake16.s
index 78afa10b..1907a93 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp8-fake16.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp8-fake16.s
@@ -38,18 +38,6 @@ v_tanh_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_tanh_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xca,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_tanh_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xca,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_tanh_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_tanh_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xca,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_tanh_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_tanh_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xca,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_prng_b32_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_prng_b32_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xcb,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -58,114 +46,30 @@ v_rcp_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_rcp_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xf9,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rcp_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_rcp_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xf9,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_rcp_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_rcp_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xf9,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_rcp_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_rcp_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xf9,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_sqrt_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfa,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sqrt_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfa,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_sqrt_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_sqrt_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfa,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_sqrt_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_sqrt_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfa,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_rsq_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_rsq_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfb,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rsq_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_rsq_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfb,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_rsq_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_rsq_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfb,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_rsq_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_rsq_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfb,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_log_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_log_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfc,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_log_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_log_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfc,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_log_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_log_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfc,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_log_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_log_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfc,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_exp_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_exp_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfd,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_exp_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_exp_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfd,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_exp_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_exp_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfd,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_exp_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_exp_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfd,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_sin_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_sin_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfe,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sin_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_sin_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfe,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_sin_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_sin_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfe,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_sin_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_sin_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfe,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_cos_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_cos_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xff,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cos_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_cos_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xff,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_cos_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_cos_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xff,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_cos_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_cos_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xff,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_cvt_f32_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_cvt_f32_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xf2,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp8.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp8.s
index 6ec4d5f..35a51db 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp8.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp8.s
@@ -42,18 +42,6 @@ v_tanh_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xca,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_tanh_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xca,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_tanh_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_tanh_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xca,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_tanh_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_tanh_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xca,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_tanh_bf16_e64_dpp v5.h, v128.h dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_tanh_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xca,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -66,18 +54,6 @@ v_rcp_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xf9,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rcp_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xf9,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_rcp_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_rcp_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xf9,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_rcp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_rcp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xf9,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_rcp_bf16_e64_dpp v5.h, v128.h dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_rcp_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xf9,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -86,18 +62,6 @@ v_sqrt_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfa,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sqrt_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfa,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_sqrt_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_sqrt_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfa,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_sqrt_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_sqrt_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfa,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_sqrt_bf16_e64_dpp v5.h, v128.h dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_sqrt_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xfa,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -106,18 +70,6 @@ v_rsq_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfb,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_rsq_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfb,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_rsq_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_rsq_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfb,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_rsq_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_rsq_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfb,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_rsq_bf16_e64_dpp v5.h, v128.h dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_rsq_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xfb,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -126,18 +78,6 @@ v_log_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfc,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_log_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfc,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_log_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_log_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfc,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_log_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_log_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfc,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_log_bf16_e64_dpp v5.h, v128.h dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_log_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xfc,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -146,18 +86,6 @@ v_exp_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfd,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_exp_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfd,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_exp_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_exp_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfd,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_exp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_exp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfd,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_exp_bf16_e64_dpp v5.h, v128.h dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_exp_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xfd,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -166,18 +94,6 @@ v_sin_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfe,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_sin_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfe,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_sin_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_sin_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfe,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_sin_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_sin_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfe,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_sin_bf16_e64_dpp v5.h, v128.h dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_sin_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xfe,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
@@ -186,18 +102,6 @@ v_cos_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xff,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-v_cos_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xff,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_cos_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX1250: v_cos_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xff,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
-v_cos_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX1250: v_cos_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xff,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
-
v_cos_bf16_e64_dpp v5.h, v128.h dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_cos_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xff,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1.txt
index 67747a6..0b39397 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1.txt
@@ -4123,18 +4123,10 @@
# GFX1250-REAL16: v_tanh_f16_e64 v5.l, v128.h op_sel:[1,0] ; encoding: [0x05,0x08,0x9f,0xd5,0x80,0x01,0x00,0x00]
# GFX1250-FAKE16: v_tanh_f16_e64 v5, v128 ; encoding: [0x05,0x00,0x9f,0xd5,0x80,0x01,0x00,0x00]
-0xff,0x81,0xca,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00
-# GFX1250-REAL16: v_tanh_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xca,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-# GFX1250-FAKE16: v_tanh_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xca,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
0x05,0x00,0xca,0xd5,0xc1,0x00,0x00,0x00
# GFX1250-REAL16: v_tanh_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xca,0xd5,0xc1,0x00,0x00,0x00]
# GFX1250-FAKE16: v_tanh_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xca,0xd5,0xc1,0x00,0x00,0x00]
-0x05,0x00,0xca,0xd5,0xf0,0x00,0x00,0x08
-# GFX1250-REAL16: v_tanh_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xca,0xd5,0xf0,0x00,0x00,0x08]
-# GFX1250-FAKE16: v_tanh_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xca,0xd5,0xf0,0x00,0x00,0x08]
-
0x05,0x00,0xca,0xd5,0x7f,0x00,0x00,0x00
# GFX1250-REAL16: v_tanh_bf16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xca,0xd5,0x7f,0x00,0x00,0x00]
# GFX1250-FAKE16: v_tanh_bf16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xca,0xd5,0x7f,0x00,0x00,0x00]
@@ -4159,10 +4151,6 @@
# GFX1250-REAL16: v_tanh_bf16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xca,0xd5,0x69,0x00,0x00,0x00]
# GFX1250-FAKE16: v_tanh_bf16_e64 v5, s105 ; encoding: [0x05,0x00,0xca,0xd5,0x69,0x00,0x00,0x00]
-0x05,0x00,0xca,0xd5,0xfd,0x00,0x00,0x10
-# GFX1250-REAL16: v_tanh_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xca,0xd5,0xfd,0x00,0x00,0x10]
-# GFX1250-FAKE16: v_tanh_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xca,0xd5,0xfd,0x00,0x00,0x10]
-
0x05,0x00,0xca,0xd5,0x7b,0x00,0x00,0x00
# GFX1250-REAL16: v_tanh_bf16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xca,0xd5,0x7b,0x00,0x00,0x00]
# GFX1250-FAKE16: v_tanh_bf16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xca,0xd5,0x7b,0x00,0x00,0x00]
@@ -4223,18 +4211,10 @@
0x05,0x00,0xcb,0xd5,0x6a,0x00,0x00,0x00
# GFX1250: v_prng_b32_e64 v5, vcc_lo ; encoding: [0x05,0x00,0xcb,0xd5,0x6a,0x00,0x00,0x00]
-0xff,0x81,0xf9,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00
-# GFX1250-REAL16: v_rcp_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xf9,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-# GFX1250-FAKE16: v_rcp_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xf9,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
0x05,0x00,0xf9,0xd5,0xc1,0x00,0x00,0x00
# GFX1250-REAL16: v_rcp_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xf9,0xd5,0xc1,0x00,0x00,0x00]
# GFX1250-FAKE16: v_rcp_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xf9,0xd5,0xc1,0x00,0x00,0x00]
-0x05,0x00,0xf9,0xd5,0xf0,0x00,0x00,0x08
-# GFX1250-REAL16: v_rcp_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xf9,0xd5,0xf0,0x00,0x00,0x08]
-# GFX1250-FAKE16: v_rcp_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xf9,0xd5,0xf0,0x00,0x00,0x08]
-
0x05,0x00,0xf9,0xd5,0x7f,0x00,0x00,0x00
# GFX1250-REAL16: v_rcp_bf16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xf9,0xd5,0x7f,0x00,0x00,0x00]
# GFX1250-FAKE16: v_rcp_bf16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xf9,0xd5,0x7f,0x00,0x00,0x00]
@@ -4259,10 +4239,6 @@
# GFX1250-REAL16: v_rcp_bf16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xf9,0xd5,0x69,0x00,0x00,0x00]
# GFX1250-FAKE16: v_rcp_bf16_e64 v5, s105 ; encoding: [0x05,0x00,0xf9,0xd5,0x69,0x00,0x00,0x00]
-0x05,0x00,0xf9,0xd5,0xfd,0x00,0x00,0x10
-# GFX1250-REAL16: v_rcp_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xf9,0xd5,0xfd,0x00,0x00,0x10]
-# GFX1250-FAKE16: v_rcp_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xf9,0xd5,0xfd,0x00,0x00,0x10]
-
0x05,0x00,0xf9,0xd5,0x7b,0x00,0x00,0x00
# GFX1250-REAL16: v_rcp_bf16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xf9,0xd5,0x7b,0x00,0x00,0x00]
# GFX1250-FAKE16: v_rcp_bf16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xf9,0xd5,0x7b,0x00,0x00,0x00]
@@ -4287,18 +4263,10 @@
# GFX1250-REAL16: v_rcp_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xf9,0xd5,0x80,0x01,0x00,0x00]
# GFX1250-FAKE16: v_rcp_bf16_e64 v5, v128 ; encoding: [0x05,0x00,0xf9,0xd5,0x80,0x01,0x00,0x00]
-0xff,0x81,0xfa,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00
-# GFX1250-REAL16: v_sqrt_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfa,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-# GFX1250-FAKE16: v_sqrt_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfa,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
0x05,0x00,0xfa,0xd5,0xc1,0x00,0x00,0x00
# GFX1250-REAL16: v_sqrt_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xfa,0xd5,0xc1,0x00,0x00,0x00]
# GFX1250-FAKE16: v_sqrt_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xfa,0xd5,0xc1,0x00,0x00,0x00]
-0x05,0x00,0xfa,0xd5,0xf0,0x00,0x00,0x08
-# GFX1250-REAL16: v_sqrt_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xfa,0xd5,0xf0,0x00,0x00,0x08]
-# GFX1250-FAKE16: v_sqrt_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xfa,0xd5,0xf0,0x00,0x00,0x08]
-
0x05,0x00,0xfa,0xd5,0x7f,0x00,0x00,0x00
# GFX1250-REAL16: v_sqrt_bf16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xfa,0xd5,0x7f,0x00,0x00,0x00]
# GFX1250-FAKE16: v_sqrt_bf16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xfa,0xd5,0x7f,0x00,0x00,0x00]
@@ -4323,10 +4291,6 @@
# GFX1250-REAL16: v_sqrt_bf16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xfa,0xd5,0x69,0x00,0x00,0x00]
# GFX1250-FAKE16: v_sqrt_bf16_e64 v5, s105 ; encoding: [0x05,0x00,0xfa,0xd5,0x69,0x00,0x00,0x00]
-0x05,0x00,0xfa,0xd5,0xfd,0x00,0x00,0x10
-# GFX1250-REAL16: v_sqrt_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xfa,0xd5,0xfd,0x00,0x00,0x10]
-# GFX1250-FAKE16: v_sqrt_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xfa,0xd5,0xfd,0x00,0x00,0x10]
-
0x05,0x00,0xfa,0xd5,0x7b,0x00,0x00,0x00
# GFX1250-REAL16: v_sqrt_bf16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xfa,0xd5,0x7b,0x00,0x00,0x00]
# GFX1250-FAKE16: v_sqrt_bf16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xfa,0xd5,0x7b,0x00,0x00,0x00]
@@ -4351,18 +4315,10 @@
# GFX1250-REAL16: v_sqrt_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xfa,0xd5,0x80,0x01,0x00,0x00]
# GFX1250-FAKE16: v_sqrt_bf16_e64 v5, v128 ; encoding: [0x05,0x00,0xfa,0xd5,0x80,0x01,0x00,0x00]
-0xff,0x81,0xfb,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00
-# GFX1250-REAL16: v_rsq_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfb,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-# GFX1250-FAKE16: v_rsq_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfb,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
0x05,0x00,0xfb,0xd5,0xc1,0x00,0x00,0x00
# GFX1250-REAL16: v_rsq_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xfb,0xd5,0xc1,0x00,0x00,0x00]
# GFX1250-FAKE16: v_rsq_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xfb,0xd5,0xc1,0x00,0x00,0x00]
-0x05,0x00,0xfb,0xd5,0xf0,0x00,0x00,0x08
-# GFX1250-REAL16: v_rsq_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xfb,0xd5,0xf0,0x00,0x00,0x08]
-# GFX1250-FAKE16: v_rsq_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xfb,0xd5,0xf0,0x00,0x00,0x08]
-
0x05,0x00,0xfb,0xd5,0x7f,0x00,0x00,0x00
# GFX1250-REAL16: v_rsq_bf16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xfb,0xd5,0x7f,0x00,0x00,0x00]
# GFX1250-FAKE16: v_rsq_bf16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xfb,0xd5,0x7f,0x00,0x00,0x00]
@@ -4387,10 +4343,6 @@
# GFX1250-REAL16: v_rsq_bf16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xfb,0xd5,0x69,0x00,0x00,0x00]
# GFX1250-FAKE16: v_rsq_bf16_e64 v5, s105 ; encoding: [0x05,0x00,0xfb,0xd5,0x69,0x00,0x00,0x00]
-0x05,0x00,0xfb,0xd5,0xfd,0x00,0x00,0x10
-# GFX1250-REAL16: v_rsq_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xfb,0xd5,0xfd,0x00,0x00,0x10]
-# GFX1250-FAKE16: v_rsq_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xfb,0xd5,0xfd,0x00,0x00,0x10]
-
0x05,0x00,0xfb,0xd5,0x7b,0x00,0x00,0x00
# GFX1250-REAL16: v_rsq_bf16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xfb,0xd5,0x7b,0x00,0x00,0x00]
# GFX1250-FAKE16: v_rsq_bf16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xfb,0xd5,0x7b,0x00,0x00,0x00]
@@ -4415,18 +4367,10 @@
# GFX1250-REAL16: v_rsq_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xfb,0xd5,0x80,0x01,0x00,0x00]
# GFX1250-FAKE16: v_rsq_bf16_e64 v5, v128 ; encoding: [0x05,0x00,0xfb,0xd5,0x80,0x01,0x00,0x00]
-0xff,0x81,0xfc,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00
-# GFX1250-REAL16: v_log_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfc,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-# GFX1250-FAKE16: v_log_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfc,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
0x05,0x00,0xfc,0xd5,0xc1,0x00,0x00,0x00
# GFX1250-REAL16: v_log_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xfc,0xd5,0xc1,0x00,0x00,0x00]
# GFX1250-FAKE16: v_log_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xfc,0xd5,0xc1,0x00,0x00,0x00]
-0x05,0x00,0xfc,0xd5,0xf0,0x00,0x00,0x08
-# GFX1250-REAL16: v_log_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xfc,0xd5,0xf0,0x00,0x00,0x08]
-# GFX1250-FAKE16: v_log_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xfc,0xd5,0xf0,0x00,0x00,0x08]
-
0x05,0x00,0xfc,0xd5,0x7f,0x00,0x00,0x00
# GFX1250-REAL16: v_log_bf16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xfc,0xd5,0x7f,0x00,0x00,0x00]
# GFX1250-FAKE16: v_log_bf16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xfc,0xd5,0x7f,0x00,0x00,0x00]
@@ -4451,10 +4395,6 @@
# GFX1250-REAL16: v_log_bf16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xfc,0xd5,0x69,0x00,0x00,0x00]
# GFX1250-FAKE16: v_log_bf16_e64 v5, s105 ; encoding: [0x05,0x00,0xfc,0xd5,0x69,0x00,0x00,0x00]
-0x05,0x00,0xfc,0xd5,0xfd,0x00,0x00,0x10
-# GFX1250-REAL16: v_log_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xfc,0xd5,0xfd,0x00,0x00,0x10]
-# GFX1250-FAKE16: v_log_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xfc,0xd5,0xfd,0x00,0x00,0x10]
-
0x05,0x00,0xfc,0xd5,0x7b,0x00,0x00,0x00
# GFX1250-REAL16: v_log_bf16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xfc,0xd5,0x7b,0x00,0x00,0x00]
# GFX1250-FAKE16: v_log_bf16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xfc,0xd5,0x7b,0x00,0x00,0x00]
@@ -4479,18 +4419,10 @@
# GFX1250-REAL16: v_log_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xfc,0xd5,0x80,0x01,0x00,0x00]
# GFX1250-FAKE16: v_log_bf16_e64 v5, v128 ; encoding: [0x05,0x00,0xfc,0xd5,0x80,0x01,0x00,0x00]
-0xff,0x81,0xfd,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00
-# GFX1250-REAL16: v_exp_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfd,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-# GFX1250-FAKE16: v_exp_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfd,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
0x05,0x00,0xfd,0xd5,0xc1,0x00,0x00,0x00
# GFX1250-REAL16: v_exp_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xfd,0xd5,0xc1,0x00,0x00,0x00]
# GFX1250-FAKE16: v_exp_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xfd,0xd5,0xc1,0x00,0x00,0x00]
-0x05,0x00,0xfd,0xd5,0xf0,0x00,0x00,0x08
-# GFX1250-REAL16: v_exp_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xfd,0xd5,0xf0,0x00,0x00,0x08]
-# GFX1250-FAKE16: v_exp_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xfd,0xd5,0xf0,0x00,0x00,0x08]
-
0x05,0x00,0xfd,0xd5,0x7f,0x00,0x00,0x00
# GFX1250-REAL16: v_exp_bf16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xfd,0xd5,0x7f,0x00,0x00,0x00]
# GFX1250-FAKE16: v_exp_bf16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xfd,0xd5,0x7f,0x00,0x00,0x00]
@@ -4515,10 +4447,6 @@
# GFX1250-REAL16: v_exp_bf16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xfd,0xd5,0x69,0x00,0x00,0x00]
# GFX1250-FAKE16: v_exp_bf16_e64 v5, s105 ; encoding: [0x05,0x00,0xfd,0xd5,0x69,0x00,0x00,0x00]
-0x05,0x00,0xfd,0xd5,0xfd,0x00,0x00,0x10
-# GFX1250-REAL16: v_exp_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xfd,0xd5,0xfd,0x00,0x00,0x10]
-# GFX1250-FAKE16: v_exp_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xfd,0xd5,0xfd,0x00,0x00,0x10]
-
0x05,0x00,0xfd,0xd5,0x7b,0x00,0x00,0x00
# GFX1250-REAL16: v_exp_bf16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xfd,0xd5,0x7b,0x00,0x00,0x00]
# GFX1250-FAKE16: v_exp_bf16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xfd,0xd5,0x7b,0x00,0x00,0x00]
@@ -4543,18 +4471,10 @@
# GFX1250-REAL16: v_exp_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xfd,0xd5,0x80,0x01,0x00,0x00]
# GFX1250-FAKE16: v_exp_bf16_e64 v5, v128 ; encoding: [0x05,0x00,0xfd,0xd5,0x80,0x01,0x00,0x00]
-0xff,0x81,0xfe,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00
-# GFX1250-REAL16: v_sin_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfe,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-# GFX1250-FAKE16: v_sin_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xfe,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
0x05,0x00,0xfe,0xd5,0xc1,0x00,0x00,0x00
# GFX1250-REAL16: v_sin_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xfe,0xd5,0xc1,0x00,0x00,0x00]
# GFX1250-FAKE16: v_sin_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xfe,0xd5,0xc1,0x00,0x00,0x00]
-0x05,0x00,0xfe,0xd5,0xf0,0x00,0x00,0x08
-# GFX1250-REAL16: v_sin_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xfe,0xd5,0xf0,0x00,0x00,0x08]
-# GFX1250-FAKE16: v_sin_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xfe,0xd5,0xf0,0x00,0x00,0x08]
-
0x05,0x00,0xfe,0xd5,0x7f,0x00,0x00,0x00
# GFX1250-REAL16: v_sin_bf16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xfe,0xd5,0x7f,0x00,0x00,0x00]
# GFX1250-FAKE16: v_sin_bf16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xfe,0xd5,0x7f,0x00,0x00,0x00]
@@ -4579,10 +4499,6 @@
# GFX1250-REAL16: v_sin_bf16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xfe,0xd5,0x69,0x00,0x00,0x00]
# GFX1250-FAKE16: v_sin_bf16_e64 v5, s105 ; encoding: [0x05,0x00,0xfe,0xd5,0x69,0x00,0x00,0x00]
-0x05,0x00,0xfe,0xd5,0xfd,0x00,0x00,0x10
-# GFX1250-REAL16: v_sin_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xfe,0xd5,0xfd,0x00,0x00,0x10]
-# GFX1250-FAKE16: v_sin_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xfe,0xd5,0xfd,0x00,0x00,0x10]
-
0x05,0x00,0xfe,0xd5,0x7b,0x00,0x00,0x00
# GFX1250-REAL16: v_sin_bf16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xfe,0xd5,0x7b,0x00,0x00,0x00]
# GFX1250-FAKE16: v_sin_bf16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xfe,0xd5,0x7b,0x00,0x00,0x00]
@@ -4607,18 +4523,10 @@
# GFX1250-REAL16: v_sin_bf16_e64 v5.h, v128.h op_sel:[1,1] ; encoding: [0x05,0x48,0xfe,0xd5,0x80,0x01,0x00,0x00]
# GFX1250-FAKE16: v_sin_bf16_e64 v5, v128 ; encoding: [0x05,0x00,0xfe,0xd5,0x80,0x01,0x00,0x00]
-0xff,0x81,0xff,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00
-# GFX1250-REAL16: v_cos_bf16_e64 v255.l, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xff,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-# GFX1250-FAKE16: v_cos_bf16_e64 v255, -|0x8000| clamp div:2 ; encoding: [0xff,0x81,0xff,0xd5,0xff,0x00,0x00,0x38,0x00,0x80,0x00,0x00]
-
0x05,0x00,0xff,0xd5,0xc1,0x00,0x00,0x00
# GFX1250-REAL16: v_cos_bf16_e64 v5.l, -1 ; encoding: [0x05,0x00,0xff,0xd5,0xc1,0x00,0x00,0x00]
# GFX1250-FAKE16: v_cos_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xff,0xd5,0xc1,0x00,0x00,0x00]
-0x05,0x00,0xff,0xd5,0xf0,0x00,0x00,0x08
-# GFX1250-REAL16: v_cos_bf16_e64 v5.l, 0.5 mul:2 ; encoding: [0x05,0x00,0xff,0xd5,0xf0,0x00,0x00,0x08]
-# GFX1250-FAKE16: v_cos_bf16_e64 v5, 0.5 mul:2 ; encoding: [0x05,0x00,0xff,0xd5,0xf0,0x00,0x00,0x08]
-
0x05,0x00,0xff,0xd5,0x7f,0x00,0x00,0x00
# GFX1250-REAL16: v_cos_bf16_e64 v5.l, exec_hi ; encoding: [0x05,0x00,0xff,0xd5,0x7f,0x00,0x00,0x00]
# GFX1250-FAKE16: v_cos_bf16_e64 v5, exec_hi ; encoding: [0x05,0x00,0xff,0xd5,0x7f,0x00,0x00,0x00]
@@ -4643,10 +4551,6 @@
# GFX1250-REAL16: v_cos_bf16_e64 v5.l, s105 ; encoding: [0x05,0x00,0xff,0xd5,0x69,0x00,0x00,0x00]
# GFX1250-FAKE16: v_cos_bf16_e64 v5, s105 ; encoding: [0x05,0x00,0xff,0xd5,0x69,0x00,0x00,0x00]
-0x05,0x00,0xff,0xd5,0xfd,0x00,0x00,0x10
-# GFX1250-REAL16: v_cos_bf16_e64 v5.l, src_scc mul:4 ; encoding: [0x05,0x00,0xff,0xd5,0xfd,0x00,0x00,0x10]
-# GFX1250-FAKE16: v_cos_bf16_e64 v5, src_scc mul:4 ; encoding: [0x05,0x00,0xff,0xd5,0xfd,0x00,0x00,0x10]
-
0x05,0x00,0xff,0xd5,0x7b,0x00,0x00,0x00
# GFX1250-REAL16: v_cos_bf16_e64 v5.l, ttmp15 ; encoding: [0x05,0x00,0xff,0xd5,0x7b,0x00,0x00,0x00]
# GFX1250-FAKE16: v_cos_bf16_e64 v5, ttmp15 ; encoding: [0x05,0x00,0xff,0xd5,0x7b,0x00,0x00,0x00]
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1_dpp16.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1_dpp16.txt
index 7c29f8a..8b26d2a 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1_dpp16.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1_dpp16.txt
@@ -104,18 +104,6 @@
# GFX1250-REAL16: v_tanh_f16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
# GFX1250-FAKE16: v_tanh_f16_e64_dpp v5, v128 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x9f,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
-0xff,0x81,0xca,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30
-# GFX1250-REAL16: v_tanh_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xca,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-# GFX1250-FAKE16: v_tanh_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xca,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-
-0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01
-# GFX1250-REAL16: v_tanh_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-# GFX1250-FAKE16: v_tanh_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-
-0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13
-# GFX1250-REAL16: v_tanh_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-# GFX1250-FAKE16: v_tanh_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-
0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff
# GFX1250-REAL16: v_tanh_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
# GFX1250-FAKE16: v_tanh_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xca,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
@@ -197,18 +185,6 @@
0x05,0x00,0xcb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff
# GFX1250: v_prng_b32_e64_dpp v5, v1 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xcb,0xd5,0xfa,0x00,0x00,0x00,0x01,0x1f,0x01,0xff]
-0xff,0x81,0xf9,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30
-# GFX1250-REAL16: v_rcp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xf9,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-# GFX1250-FAKE16: v_rcp_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xf9,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-
-0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01
-# GFX1250-REAL16: v_rcp_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-# GFX1250-FAKE16: v_rcp_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-
-0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13
-# GFX1250-REAL16: v_rcp_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-# GFX1250-FAKE16: v_rcp_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-
0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff
# GFX1250-REAL16: v_rcp_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
# GFX1250-FAKE16: v_rcp_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
@@ -257,18 +233,6 @@
# GFX1250-REAL16: v_rcp_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
# GFX1250-FAKE16: v_rcp_bf16_e64_dpp v5, v128 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xf9,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
-0xff,0x81,0xfa,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30
-# GFX1250-REAL16: v_sqrt_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfa,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-# GFX1250-FAKE16: v_sqrt_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfa,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-
-0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01
-# GFX1250-REAL16: v_sqrt_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-# GFX1250-FAKE16: v_sqrt_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-
-0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13
-# GFX1250-REAL16: v_sqrt_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-# GFX1250-FAKE16: v_sqrt_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-
0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff
# GFX1250-REAL16: v_sqrt_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
# GFX1250-FAKE16: v_sqrt_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
@@ -317,18 +281,6 @@
# GFX1250-REAL16: v_sqrt_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
# GFX1250-FAKE16: v_sqrt_bf16_e64_dpp v5, v128 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfa,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
-0xff,0x81,0xfb,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30
-# GFX1250-REAL16: v_rsq_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfb,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-# GFX1250-FAKE16: v_rsq_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfb,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-
-0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01
-# GFX1250-REAL16: v_rsq_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-# GFX1250-FAKE16: v_rsq_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-
-0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13
-# GFX1250-REAL16: v_rsq_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-# GFX1250-FAKE16: v_rsq_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-
0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff
# GFX1250-REAL16: v_rsq_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
# GFX1250-FAKE16: v_rsq_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
@@ -377,18 +329,6 @@
# GFX1250-REAL16: v_rsq_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
# GFX1250-FAKE16: v_rsq_bf16_e64_dpp v5, v128 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfb,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
-0xff,0x81,0xfc,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30
-# GFX1250-REAL16: v_log_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfc,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-# GFX1250-FAKE16: v_log_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfc,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-
-0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01
-# GFX1250-REAL16: v_log_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-# GFX1250-FAKE16: v_log_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-
-0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13
-# GFX1250-REAL16: v_log_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-# GFX1250-FAKE16: v_log_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-
0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff
# GFX1250-REAL16: v_log_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
# GFX1250-FAKE16: v_log_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
@@ -437,18 +377,6 @@
# GFX1250-REAL16: v_log_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
# GFX1250-FAKE16: v_log_bf16_e64_dpp v5, v128 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfc,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
-0xff,0x81,0xfd,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30
-# GFX1250-REAL16: v_exp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfd,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-# GFX1250-FAKE16: v_exp_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfd,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-
-0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01
-# GFX1250-REAL16: v_exp_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-# GFX1250-FAKE16: v_exp_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-
-0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13
-# GFX1250-REAL16: v_exp_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-# GFX1250-FAKE16: v_exp_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-
0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff
# GFX1250-REAL16: v_exp_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
# GFX1250-FAKE16: v_exp_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
@@ -497,18 +425,6 @@
# GFX1250-REAL16: v_exp_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
# GFX1250-FAKE16: v_exp_bf16_e64_dpp v5, v128 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfd,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
-0xff,0x81,0xfe,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30
-# GFX1250-REAL16: v_sin_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfe,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-# GFX1250-FAKE16: v_sin_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xfe,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-
-0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01
-# GFX1250-REAL16: v_sin_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-# GFX1250-FAKE16: v_sin_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-
-0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13
-# GFX1250-REAL16: v_sin_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-# GFX1250-FAKE16: v_sin_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-
0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff
# GFX1250-REAL16: v_sin_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
# GFX1250-FAKE16: v_sin_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
@@ -557,18 +473,6 @@
# GFX1250-REAL16: v_sin_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x48,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
# GFX1250-FAKE16: v_sin_bf16_e64_dpp v5, v128 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xfe,0xd5,0xfa,0x00,0x00,0x00,0x80,0x1b,0x00,0xff]
-0xff,0x81,0xff,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30
-# GFX1250-REAL16: v_cos_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xff,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-# GFX1250-FAKE16: v_cos_bf16_e64_dpp v255, -|v255| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x81,0xff,0xd5,0xfa,0x00,0x00,0x38,0xff,0x6f,0x05,0x30]
-
-0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01
-# GFX1250-REAL16: v_cos_bf16_e64_dpp v5.l, v1.l mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-# GFX1250-FAKE16: v_cos_bf16_e64_dpp v5, v1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x08,0x01,0x5f,0x01,0x01]
-
-0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13
-# GFX1250-REAL16: v_cos_bf16_e64_dpp v5.l, v1.l mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-# GFX1250-FAKE16: v_cos_bf16_e64_dpp v5, v1 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x10,0x01,0x60,0x09,0x13]
-
0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff
# GFX1250-REAL16: v_cos_bf16_e64_dpp v5.l, v1.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
# GFX1250-FAKE16: v_cos_bf16_e64_dpp v5, v1 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0xff,0xd5,0xfa,0x00,0x00,0x00,0x01,0xe4,0x00,0xff]
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1_dpp8.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1_dpp8.txt
index d26bc46..15f76c5 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1_dpp8.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1_dpp8.txt
@@ -34,22 +34,10 @@
# GFX1250-REAL16: v_tanh_f16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0x9f,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
# GFX1250-FAKE16: v_tanh_f16_e64_dpp v5, v128 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x9f,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
-0xff,0x81,0xca,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00
-# GFX1250-REAL16: v_tanh_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xca,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-# GFX1250-FAKE16: v_tanh_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xca,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-
0x05,0x00,0xca,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05
# GFX1250-REAL16: v_tanh_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xca,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
# GFX1250-FAKE16: v_tanh_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xca,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
-0x05,0x00,0xca,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05
-# GFX1250-REAL16: v_tanh_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xca,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-# GFX1250-FAKE16: v_tanh_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xca,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-
-0x05,0x00,0xca,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05
-# GFX1250-REAL16: v_tanh_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xca,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-# GFX1250-FAKE16: v_tanh_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xca,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-
0x05,0x48,0xca,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05
# GFX1250-REAL16: v_tanh_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xca,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
# GFX1250-FAKE16: v_tanh_bf16_e64_dpp v5, v128 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xca,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
@@ -57,142 +45,58 @@
0x05,0x00,0xcb,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05
# GFX1250: v_prng_b32_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xcb,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
-0xff,0x81,0xf9,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00
-# GFX1250-REAL16: v_rcp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xf9,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-# GFX1250-FAKE16: v_rcp_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xf9,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-
0x05,0x00,0xf9,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05
# GFX1250-REAL16: v_rcp_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xf9,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
# GFX1250-FAKE16: v_rcp_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xf9,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
-0x05,0x00,0xf9,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05
-# GFX1250-REAL16: v_rcp_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xf9,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-# GFX1250-FAKE16: v_rcp_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xf9,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-
-0x05,0x00,0xf9,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05
-# GFX1250-REAL16: v_rcp_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xf9,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-# GFX1250-FAKE16: v_rcp_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xf9,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-
0x05,0x48,0xf9,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05
# GFX1250-REAL16: v_rcp_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xf9,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
# GFX1250-FAKE16: v_rcp_bf16_e64_dpp v5, v128 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xf9,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
-0xff,0x81,0xfa,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00
-# GFX1250-REAL16: v_sqrt_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfa,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-# GFX1250-FAKE16: v_sqrt_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfa,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-
0x05,0x00,0xfa,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05
# GFX1250-REAL16: v_sqrt_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfa,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
# GFX1250-FAKE16: v_sqrt_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfa,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
-0x05,0x00,0xfa,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05
-# GFX1250-REAL16: v_sqrt_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfa,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-# GFX1250-FAKE16: v_sqrt_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfa,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-
-0x05,0x00,0xfa,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05
-# GFX1250-REAL16: v_sqrt_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfa,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-# GFX1250-FAKE16: v_sqrt_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfa,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-
0x05,0x48,0xfa,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05
# GFX1250-REAL16: v_sqrt_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xfa,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
# GFX1250-FAKE16: v_sqrt_bf16_e64_dpp v5, v128 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfa,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
-0xff,0x81,0xfb,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00
-# GFX1250-REAL16: v_rsq_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfb,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-# GFX1250-FAKE16: v_rsq_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfb,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-
0x05,0x00,0xfb,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05
# GFX1250-REAL16: v_rsq_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfb,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
# GFX1250-FAKE16: v_rsq_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfb,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
-0x05,0x00,0xfb,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05
-# GFX1250-REAL16: v_rsq_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfb,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-# GFX1250-FAKE16: v_rsq_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfb,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-
-0x05,0x00,0xfb,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05
-# GFX1250-REAL16: v_rsq_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfb,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-# GFX1250-FAKE16: v_rsq_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfb,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-
0x05,0x48,0xfb,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05
# GFX1250-REAL16: v_rsq_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xfb,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
# GFX1250-FAKE16: v_rsq_bf16_e64_dpp v5, v128 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfb,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
-0xff,0x81,0xfc,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00
-# GFX1250-REAL16: v_log_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfc,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-# GFX1250-FAKE16: v_log_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfc,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-
0x05,0x00,0xfc,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05
# GFX1250-REAL16: v_log_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfc,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
# GFX1250-FAKE16: v_log_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfc,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
-0x05,0x00,0xfc,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05
-# GFX1250-REAL16: v_log_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfc,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-# GFX1250-FAKE16: v_log_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfc,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-
-0x05,0x00,0xfc,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05
-# GFX1250-REAL16: v_log_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfc,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-# GFX1250-FAKE16: v_log_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfc,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-
0x05,0x48,0xfc,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05
# GFX1250-REAL16: v_log_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xfc,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
# GFX1250-FAKE16: v_log_bf16_e64_dpp v5, v128 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfc,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
-0xff,0x81,0xfd,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00
-# GFX1250-REAL16: v_exp_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfd,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-# GFX1250-FAKE16: v_exp_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfd,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-
0x05,0x00,0xfd,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05
# GFX1250-REAL16: v_exp_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfd,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
# GFX1250-FAKE16: v_exp_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfd,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
-0x05,0x00,0xfd,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05
-# GFX1250-REAL16: v_exp_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfd,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-# GFX1250-FAKE16: v_exp_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfd,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-
-0x05,0x00,0xfd,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05
-# GFX1250-REAL16: v_exp_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfd,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-# GFX1250-FAKE16: v_exp_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfd,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-
0x05,0x48,0xfd,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05
# GFX1250-REAL16: v_exp_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xfd,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
# GFX1250-FAKE16: v_exp_bf16_e64_dpp v5, v128 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfd,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
-0xff,0x81,0xfe,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00
-# GFX1250-REAL16: v_sin_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfe,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-# GFX1250-FAKE16: v_sin_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xfe,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-
0x05,0x00,0xfe,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05
# GFX1250-REAL16: v_sin_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfe,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
# GFX1250-FAKE16: v_sin_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfe,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
-0x05,0x00,0xfe,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05
-# GFX1250-REAL16: v_sin_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfe,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-# GFX1250-FAKE16: v_sin_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfe,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-
-0x05,0x00,0xfe,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05
-# GFX1250-REAL16: v_sin_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfe,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-# GFX1250-FAKE16: v_sin_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xfe,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-
0x05,0x48,0xfe,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05
# GFX1250-REAL16: v_sin_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xfe,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
# GFX1250-FAKE16: v_sin_bf16_e64_dpp v5, v128 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xfe,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
-0xff,0x81,0xff,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00
-# GFX1250-REAL16: v_cos_bf16_e64_dpp v255.l, -|v255.l| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xff,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-# GFX1250-FAKE16: v_cos_bf16_e64_dpp v255, -|v255| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x81,0xff,0xd5,0xe9,0x00,0x00,0x38,0xff,0x00,0x00,0x00]
-
0x05,0x00,0xff,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05
# GFX1250-REAL16: v_cos_bf16_e64_dpp v5.l, v1.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xff,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
# GFX1250-FAKE16: v_cos_bf16_e64_dpp v5, v1 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xff,0xd5,0xe9,0x00,0x00,0x00,0x01,0x77,0x39,0x05]
-0x05,0x00,0xff,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05
-# GFX1250-REAL16: v_cos_bf16_e64_dpp v5.l, v1.l mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xff,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-# GFX1250-FAKE16: v_cos_bf16_e64_dpp v5, v1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xff,0xd5,0xe9,0x00,0x00,0x08,0x01,0x77,0x39,0x05]
-
-0x05,0x00,0xff,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05
-# GFX1250-REAL16: v_cos_bf16_e64_dpp v5.l, v1.l mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xff,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-# GFX1250-FAKE16: v_cos_bf16_e64_dpp v5, v1 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x00,0xff,0xd5,0xea,0x00,0x00,0x10,0x01,0x77,0x39,0x05]
-
0x05,0x48,0xff,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05
# GFX1250-REAL16: v_cos_bf16_e64_dpp v5.h, v128.h op_sel:[1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x48,0xff,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
# GFX1250-FAKE16: v_cos_bf16_e64_dpp v5, v128 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0xff,0xd5,0xe9,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
diff --git a/llvm/test/MC/RISCV/attribute-arch.s b/llvm/test/MC/RISCV/attribute-arch.s
index 111616d..e41c9ea 100644
--- a/llvm/test/MC/RISCV/attribute-arch.s
+++ b/llvm/test/MC/RISCV/attribute-arch.s
@@ -348,6 +348,9 @@
.attribute arch, "rv32i_smepmp1p0"
# CHECK: attribute 5, "rv32i2p1_smepmp1p0"
+.attribute arch, "rv32i_smpmpmt0p6"
+# CHECK: attribute 5, "rv32i2p1_smpmpmt0p6"
+
.attribute arch, "rv32i_smrnmi1p0"
# CHECK: attribute 5, "rv32i2p1_smrnmi1p0"
diff --git a/llvm/test/Other/new-pm-defaults.ll b/llvm/test/Other/new-pm-defaults.ll
index 62975a3..b59d4cf6 100644
--- a/llvm/test/Other/new-pm-defaults.ll
+++ b/llvm/test/Other/new-pm-defaults.ll
@@ -261,6 +261,7 @@
; CHECK-O-NEXT: Running analysis: LoopAccessAnalysis on foo
; CHECK-O-NEXT: Running pass: InjectTLIMappings
; CHECK-O-NEXT: Running pass: LoopVectorizePass
+; CHECK-DEFAULT-NEXT: Running pass: DropUnnecessaryAssumesPass
; CHECK-O-NEXT: Running pass: InferAlignmentPass
; CHECK-O-NEXT: Running pass: LoopLoadEliminationPass
; CHECK-O-NEXT: Running pass: InstCombinePass
diff --git a/llvm/test/Other/new-pm-lto-defaults.ll b/llvm/test/Other/new-pm-lto-defaults.ll
index f595dfe..c865d77 100644
--- a/llvm/test/Other/new-pm-lto-defaults.ll
+++ b/llvm/test/Other/new-pm-lto-defaults.ll
@@ -129,6 +129,7 @@
; CHECK-O23SZ-NEXT: Running analysis: LoopAccessAnalysis on foo
; CHECK-O23SZ-NEXT: Running pass: LoopVectorizePass on foo
; CHECK-O23SZ-NEXT: Running analysis: DemandedBitsAnalysis on foo
+; CHECK-O23SZ-NEXT: Running pass: DropUnnecessaryAssumesPass on foo
; CHECK-O23SZ-NEXT: Running pass: InferAlignmentPass on foo
; CHECK-O23SZ-NEXT: Running pass: LoopUnrollPass on foo
; CHECK-O23SZ-NEXT: WarnMissedTransformationsPass on foo
diff --git a/llvm/test/Other/new-pm-thinlto-postlink-defaults.ll b/llvm/test/Other/new-pm-thinlto-postlink-defaults.ll
index 012a1ab..c1d8b42 100644
--- a/llvm/test/Other/new-pm-thinlto-postlink-defaults.ll
+++ b/llvm/test/Other/new-pm-thinlto-postlink-defaults.ll
@@ -180,6 +180,7 @@
; CHECK-POSTLINK-O-NEXT: Running analysis: LoopAccessAnalysis on foo
; CHECK-POSTLINK-O-NEXT: Running pass: InjectTLIMappings
; CHECK-POSTLINK-O-NEXT: Running pass: LoopVectorizePass
+; CHECK-POSTLINK-O-NEXT: Running pass: DropUnnecessaryAssumesPass
; CHECK-POSTLINK-O-NEXT: Running pass: InferAlignmentPass
; CHECK-POSTLINK-O-NEXT: Running pass: LoopLoadEliminationPass
; CHECK-POSTLINK-O-NEXT: Running pass: InstCombinePass
diff --git a/llvm/test/Other/new-pm-thinlto-postlink-pgo-defaults.ll b/llvm/test/Other/new-pm-thinlto-postlink-pgo-defaults.ll
index e021ff3..45f0902 100644
--- a/llvm/test/Other/new-pm-thinlto-postlink-pgo-defaults.ll
+++ b/llvm/test/Other/new-pm-thinlto-postlink-pgo-defaults.ll
@@ -165,6 +165,7 @@
; CHECK-O-NEXT: Running analysis: LoopAccessAnalysis on foo
; CHECK-O-NEXT: Running pass: InjectTLIMappings
; CHECK-O-NEXT: Running pass: LoopVectorizePass
+; CHECK-O-NEXT: Running pass: DropUnnecessaryAssumesPass
; CHECK-O-NEXT: Running pass: InferAlignmentPass
; CHECK-O-NEXT: Running pass: LoopLoadEliminationPass
; CHECK-O-NEXT: Running pass: InstCombinePass
diff --git a/llvm/test/Other/new-pm-thinlto-postlink-samplepgo-defaults.ll b/llvm/test/Other/new-pm-thinlto-postlink-samplepgo-defaults.ll
index 20f94bc..4c330f4 100644
--- a/llvm/test/Other/new-pm-thinlto-postlink-samplepgo-defaults.ll
+++ b/llvm/test/Other/new-pm-thinlto-postlink-samplepgo-defaults.ll
@@ -174,6 +174,7 @@
; CHECK-O-NEXT: Running analysis: LoopAccessAnalysis
; CHECK-O-NEXT: Running pass: InjectTLIMappings
; CHECK-O-NEXT: Running pass: LoopVectorizePass
+; CHECK-O-NEXT: Running pass: DropUnnecessaryAssumesPass
; CHECK-O-NEXT: Running pass: InferAlignmentPass
; CHECK-O-NEXT: Running pass: LoopLoadEliminationPass
; CHECK-O-NEXT: Running pass: InstCombinePass
diff --git a/llvm/test/Transforms/CodeExtractor/PartialInlineDebug.ll b/llvm/test/Transforms/CodeExtractor/PartialInlineDebug.ll
index eb2fb4f..ab01bbf 100644
--- a/llvm/test/Transforms/CodeExtractor/PartialInlineDebug.ll
+++ b/llvm/test/Transforms/CodeExtractor/PartialInlineDebug.ll
@@ -96,11 +96,11 @@ entry:
!13 = !DILocalVariable(name: "v", arg: 1, scope: !8, file: !1, line: 3, type: !11)
!14 = !DILocation(line: 5, column: 10, scope: !8)
!15 = distinct !DILexicalBlock(scope: !16, file: !1, line: 9, column: 7)
-!16 = distinct !DISubprogram(name: "callee", scope: !1, file: !1, line: 8, type: !9, isLocal: false, isDefinition: true, scopeLine: 8, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !12)
+!16 = distinct !DISubprogram(name: "callee", scope: !1, file: !1, line: 8, type: !9, isLocal: false, isDefinition: true, scopeLine: 8, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !2)
!17 = !DILocation(line: 10, column: 7, scope: !15)
-!18 = distinct !DISubprogram(name: "callee2", scope: !1, file: !1, line: 8, type: !9, isLocal: false, isDefinition: true, scopeLine: 8, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !12)
+!18 = distinct !DISubprogram(name: "callee2", scope: !1, file: !1, line: 8, type: !9, isLocal: false, isDefinition: true, scopeLine: 8, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !2)
!19 = distinct !DILexicalBlock(scope: !18, file: !1, line: 100, column: 1)
!20 = !DILocation(line: 110, column: 17, scope: !19)
-!21 = distinct !DISubprogram(name: "caller2", scope: !1, file: !1, line: 8, type: !9, isLocal: false, isDefinition: true, scopeLine: 8, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !12)
+!21 = distinct !DISubprogram(name: "caller2", scope: !1, file: !1, line: 8, type: !9, isLocal: false, isDefinition: true, scopeLine: 8, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !2)
!22 = !DILocation(line: 110, column: 17, scope: !21)
!23 = !DILocation(line: 15, column: 7, scope: !15)
diff --git a/llvm/test/Transforms/DropUnnecessaryAssumes/dereferenceable.ll b/llvm/test/Transforms/DropUnnecessaryAssumes/dereferenceable.ll
new file mode 100644
index 0000000..43fa08c
--- /dev/null
+++ b/llvm/test/Transforms/DropUnnecessaryAssumes/dereferenceable.ll
@@ -0,0 +1,54 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -passes='drop-unnecessary-assumes' -S %s | FileCheck %s
+; RUN: opt -passes='drop-unnecessary-assumes<drop-deref>' -S %s | FileCheck --check-prefix=DROP-DEREF %s
+
+declare void @use(ptr)
+
+define i8 @test_dereferenceable_assume_ptr_not_used(ptr %p, i64 %size) {
+; CHECK-LABEL: define i8 @test_dereferenceable_assume_ptr_not_used(
+; CHECK-SAME: ptr [[P:%.*]], i64 [[SIZE:%.*]]) {
+; CHECK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 [[SIZE]]) ]
+; CHECK-NEXT: ret i8 0
+;
+; DROP-DEREF-LABEL: define i8 @test_dereferenceable_assume_ptr_not_used(
+; DROP-DEREF-SAME: ptr [[P:%.*]], i64 [[SIZE:%.*]]) {
+; DROP-DEREF-NEXT: ret i8 0
+;
+ call void @llvm.assume(i1 true) [ "dereferenceable"(ptr %p, i64 %size) ]
+ ret i8 0
+}
+
+define i8 @test_dereferenceable_assume_ptr_used_variable_size(ptr %p, i64 %size) {
+; CHECK-LABEL: define i8 @test_dereferenceable_assume_ptr_used_variable_size(
+; CHECK-SAME: ptr [[P:%.*]], i64 [[SIZE:%.*]]) {
+; CHECK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 [[SIZE]]) ]
+; CHECK-NEXT: [[VAL:%.*]] = load i8, ptr [[P]], align 1
+; CHECK-NEXT: ret i8 [[VAL]]
+;
+; DROP-DEREF-LABEL: define i8 @test_dereferenceable_assume_ptr_used_variable_size(
+; DROP-DEREF-SAME: ptr [[P:%.*]], i64 [[SIZE:%.*]]) {
+; DROP-DEREF-NEXT: [[VAL:%.*]] = load i8, ptr [[P]], align 1
+; DROP-DEREF-NEXT: ret i8 [[VAL]]
+;
+ call void @llvm.assume(i1 true) [ "dereferenceable"(ptr %p, i64 %size) ]
+ %val = load i8, ptr %p
+ ret i8 %val
+}
+
+define i8 @test_dereferenceable_with_align_ptr_used(ptr %p, i64 %size) {
+; CHECK-LABEL: define i8 @test_dereferenceable_with_align_ptr_used(
+; CHECK-SAME: ptr [[P:%.*]], i64 [[SIZE:%.*]]) {
+; CHECK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 [[SIZE]]), "align"(ptr [[P]], i64 8) ]
+; CHECK-NEXT: [[VAL:%.*]] = load i8, ptr [[P]], align 1
+; CHECK-NEXT: ret i8 [[VAL]]
+;
+; DROP-DEREF-LABEL: define i8 @test_dereferenceable_with_align_ptr_used(
+; DROP-DEREF-SAME: ptr [[P:%.*]], i64 [[SIZE:%.*]]) {
+; DROP-DEREF-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[P]], i64 8) ]
+; DROP-DEREF-NEXT: [[VAL:%.*]] = load i8, ptr [[P]], align 1
+; DROP-DEREF-NEXT: ret i8 [[VAL]]
+;
+ call void @llvm.assume(i1 true) [ "dereferenceable"(ptr %p, i64 %size), "align"(ptr %p, i64 8) ]
+ %val = load i8, ptr %p
+ ret i8 %val
+}
diff --git a/llvm/test/Transforms/HotColdSplit/split-out-dbg-label.ll b/llvm/test/Transforms/HotColdSplit/split-out-dbg-label.ll
index da6c19d..76406dd 100644
--- a/llvm/test/Transforms/HotColdSplit/split-out-dbg-label.ll
+++ b/llvm/test/Transforms/HotColdSplit/split-out-dbg-label.ll
@@ -66,7 +66,7 @@ define void @inline_me() !dbg !13 {
!10 = !DIBasicType(name: "ty32", size: 32, encoding: DW_ATE_unsigned)
!11 = !DILocation(line: 1, column: 1, scope: !6)
!12 = !DILabel(scope: !6, name: "bye", file: !1, line: 28)
-!13 = distinct !DISubprogram(name: "inline_me", linkageName: "inline_me", scope: null, file: !1, line: 1, type: !7, isLocal: false, isDefinition: true, scopeLine: 1, isOptimized: true, unit: !0, retainedNodes: !8)
+!13 = distinct !DISubprogram(name: "inline_me", linkageName: "inline_me", scope: null, file: !1, line: 1, type: !7, isLocal: false, isDefinition: true, scopeLine: 1, isOptimized: true, unit: !0, retainedNodes: !2)
!14 = !DILabel(scope: !13, name: "label_in_@inline_me", file: !1, line: 29)
!15 = !DILocation(line: 2, column: 2, scope: !13, inlinedAt: !11)
!16 = !DILabel(scope: !17, name: "scoped_label_in_foo", file: !1, line: 30)
diff --git a/llvm/test/Transforms/HotColdSplit/transfer-debug-info.ll b/llvm/test/Transforms/HotColdSplit/transfer-debug-info.ll
index 3f69f0c..f9dd9eaf 100644
--- a/llvm/test/Transforms/HotColdSplit/transfer-debug-info.ll
+++ b/llvm/test/Transforms/HotColdSplit/transfer-debug-info.ll
@@ -106,7 +106,7 @@ define void @inline_me() !dbg !12{
!9 = !DILocalVariable(name: "1", scope: !6, file: !1, line: 1, type: !10)
!10 = !DIBasicType(name: "ty32", size: 32, encoding: DW_ATE_unsigned)
!11 = !DILocation(line: 1, column: 1, scope: !6)
-!12 = distinct !DISubprogram(name: "inline_me", linkageName: "inline_me", scope: null, file: !1, line: 1, type: !7, isLocal: false, isDefinition: true, scopeLine: 1, isOptimized: true, unit: !0, retainedNodes: !8)
+!12 = distinct !DISubprogram(name: "inline_me", linkageName: "inline_me", scope: null, file: !1, line: 1, type: !7, isLocal: false, isDefinition: true, scopeLine: 1, isOptimized: true, unit: !0, retainedNodes: !2)
!13 = !DILocation(line: 2, column: 2, scope: !12, inlinedAt: !14)
!14 = !DILocation(line: 3, column: 3, scope: !15)
!15 = distinct !DILexicalBlock(scope: !16, file: !1, line: 4, column: 4)
diff --git a/llvm/test/Transforms/InstCombine/debuginfo-dce.ll b/llvm/test/Transforms/InstCombine/debuginfo-dce.ll
index c1d7c30..ec90779 100644
--- a/llvm/test/Transforms/InstCombine/debuginfo-dce.ll
+++ b/llvm/test/Transforms/InstCombine/debuginfo-dce.ll
@@ -125,15 +125,15 @@ attributes #1 = { nounwind readnone }
!19 = !DILocation(line: 6, column: 17, scope: !14)
!20 = !DIExpression(DW_OP_plus_uconst, 0)
!21 = !DILocation(line: 11, column: 1, scope: !14)
-!22 = distinct !DISubprogram(name: "scan", scope: !1, file: !1, line: 4, type: !15, isLocal: false, isDefinition: true, scopeLine: 5, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !17)
+!22 = distinct !DISubprogram(name: "scan", scope: !1, file: !1, line: 4, type: !15, isLocal: false, isDefinition: true, scopeLine: 5, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !2)
!23 = !DILocation(line: 6, column: 17, scope: !22)
!24 = !DILocalVariable(name: "entry", scope: !22, file: !1, line: 6, type: !4)
-!25 = distinct !DISubprogram(name: "scan", scope: !1, file: !1, line: 4, type: !15, isLocal: false, isDefinition: true, scopeLine: 5, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !17)
+!25 = distinct !DISubprogram(name: "scan", scope: !1, file: !1, line: 4, type: !15, isLocal: false, isDefinition: true, scopeLine: 5, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !2)
!26 = !DILocation(line: 6, column: 17, scope: !25)
!27 = !DILocalVariable(name: "entry", scope: !25, file: !1, line: 6, type: !4)
-!28 = distinct !DISubprogram(name: "scan", scope: !1, file: !1, line: 4, type: !15, isLocal: false, isDefinition: true, scopeLine: 5, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !17)
+!28 = distinct !DISubprogram(name: "scan", scope: !1, file: !1, line: 4, type: !15, isLocal: false, isDefinition: true, scopeLine: 5, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !2)
!29 = !DILocation(line: 6, column: 17, scope: !28)
!30 = !DILocalVariable(name: "entry", scope: !28, file: !1, line: 6, type: !4)
-!31 = distinct !DISubprogram(name: "scan", scope: !1, file: !1, line: 4, type: !15, isLocal: false, isDefinition: true, scopeLine: 5, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !17)
+!31 = distinct !DISubprogram(name: "scan", scope: !1, file: !1, line: 4, type: !15, isLocal: false, isDefinition: true, scopeLine: 5, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !2)
!32 = !DILocation(line: 6, column: 17, scope: !31)
!33 = !DILocalVariable(name: "entry", scope: !31, file: !1, line: 6, type: !4)
diff --git a/llvm/test/Transforms/LCSSA/rewrite-existing-dbg-values.ll b/llvm/test/Transforms/LCSSA/rewrite-existing-dbg-values.ll
index 437e566..fa83575 100644
--- a/llvm/test/Transforms/LCSSA/rewrite-existing-dbg-values.ll
+++ b/llvm/test/Transforms/LCSSA/rewrite-existing-dbg-values.ll
@@ -131,7 +131,8 @@ declare void @llvm.dbg.value(metadata, metadata, metadata)
!10 = !DILexicalBlockFile(scope: !6, file: !1, discriminator: 0)
!11 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
!12 = !DILocation(line: 0, scope: !10)
-!13 = distinct !DISubprogram(name: "multi_exit", scope: !1, file: !1, line: 10, type: !7, scopeLine: 10, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !8)
+!13 = distinct !DISubprogram(name: "multi_exit", scope: !1, file: !1, line: 10, type: !7, scopeLine: 10, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !17)
!14 = !DILocation(line: 0, scope: !15)
!15 = !DILexicalBlockFile(scope: !13, file: !1, discriminator: 0)
!16 = !DILocalVariable(name: "sum2", scope: !15, file: !1, line: 11, type: !11)
+!17 = !{!16}
diff --git a/llvm/test/Transforms/LoopStrengthReduce/AArch64/vscale-fixups.ll b/llvm/test/Transforms/LoopStrengthReduce/AArch64/vscale-fixups.ll
index 9003072..dd347a7 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/AArch64/vscale-fixups.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/AArch64/vscale-fixups.ll
@@ -19,9 +19,8 @@ define void @mulvl123_addressing(ptr %src, ptr %dst, i64 %count) #0 {
; COMMON-NEXT: ldr z3, [x0, #3, mul vl]
; COMMON-NEXT: addvl x0, x0, #5
; COMMON-NEXT: umax z0.b, p0/m, z0.b, z1.b
-; COMMON-NEXT: movprfx z1, z2
-; COMMON-NEXT: umax z1.b, p0/m, z1.b, z3.b
-; COMMON-NEXT: umax z0.b, p0/m, z0.b, z1.b
+; COMMON-NEXT: umax z2.b, p0/m, z2.b, z3.b
+; COMMON-NEXT: umax z0.b, p0/m, z0.b, z2.b
; COMMON-NEXT: st1b { z0.b }, p0, [x1, x8]
; COMMON-NEXT: incb x8
; COMMON-NEXT: cmp x8, x2
diff --git a/llvm/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll b/llvm/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
index c12d813..082b876 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
@@ -234,16 +234,17 @@ define void @extrastride(ptr nocapture %main, i32 %main_stride, ptr nocapture %r
; X32-NEXT: .p2align 4
; X32-NEXT: .LBB2_2: # %for.body
; X32-NEXT: # =>This Inner Loop Header: Depth=1
-; X32-NEXT: movl (%ebx,%esi), %ebp
-; X32-NEXT: addl (%ebx), %ebp
-; X32-NEXT: addl %esi, %ebx
-; X32-NEXT: addl (%esi,%ebx), %ebp
-; X32-NEXT: addl %esi, %ebx
-; X32-NEXT: addl (%esi,%ebx), %ebp
-; X32-NEXT: addl %esi, %ebx
-; X32-NEXT: addl (%esi,%ebx), %ebp
-; X32-NEXT: movl %ebp, (%edx)
-; X32-NEXT: addl %esi, %ebx
+; X32-NEXT: movl %ebx, %ebp
+; X32-NEXT: movl (%ebx,%esi), %ebx
+; X32-NEXT: addl (%ebp), %ebx
+; X32-NEXT: addl %esi, %ebp
+; X32-NEXT: addl (%esi,%ebp), %ebx
+; X32-NEXT: addl %esi, %ebp
+; X32-NEXT: addl (%esi,%ebp), %ebx
+; X32-NEXT: addl %esi, %ebp
+; X32-NEXT: addl (%esi,%ebp), %ebx
+; X32-NEXT: movl %ebx, (%edx)
+; X32-NEXT: leal (%ebp,%esi), %ebx
; X32-NEXT: addl %edi, %ebx
; X32-NEXT: addl %ecx, %edx
; X32-NEXT: decl %eax
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll
index f163517..2f7e356 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll
@@ -429,48 +429,36 @@ define i32 @header_mask_and_invariant_compare(ptr %A, ptr %B, ptr %C, ptr %D, pt
; DEFAULT-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
; DEFAULT-NEXT: br label %[[VECTOR_BODY:.*]]
; DEFAULT: [[VECTOR_BODY]]:
-; DEFAULT-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE37:.*]] ]
-; DEFAULT-NEXT: [[TMP9:%.*]] = load i32, ptr [[A]], align 4, !alias.scope [[META8:![0-9]+]]
-; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT28:%.*]] = insertelement <4 x i32> poison, i32 [[TMP9]], i64 0
-; DEFAULT-NEXT: [[BROADCAST_SPLAT29:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT28]], <4 x i32> poison, <4 x i32> zeroinitializer
-; DEFAULT-NEXT: [[TMP19:%.*]] = load i32, ptr [[B]], align 4, !alias.scope [[META11:![0-9]+]]
-; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[TMP19]], i64 0
-; DEFAULT-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
-; DEFAULT-NEXT: [[TMP6:%.*]] = or <4 x i32> [[BROADCAST_SPLAT]], [[BROADCAST_SPLAT29]]
-; DEFAULT-NEXT: [[TMP7:%.*]] = load i32, ptr [[C]], align 4, !alias.scope [[META13:![0-9]+]]
-; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT30:%.*]] = insertelement <4 x i32> poison, i32 [[TMP7]], i64 0
-; DEFAULT-NEXT: [[BROADCAST_SPLAT31:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT30]], <4 x i32> poison, <4 x i32> zeroinitializer
-; DEFAULT-NEXT: [[TMP8:%.*]] = icmp ugt <4 x i32> [[BROADCAST_SPLAT31]], [[TMP6]]
+; DEFAULT-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE33:.*]] ]
+; DEFAULT-NEXT: [[TMP3:%.*]] = load i32, ptr [[A]], align 4, !alias.scope [[META8:![0-9]+]]
+; DEFAULT-NEXT: [[TMP4:%.*]] = load i32, ptr [[B]], align 4, !alias.scope [[META11:![0-9]+]]
+; DEFAULT-NEXT: [[TMP5:%.*]] = or i32 [[TMP4]], [[TMP3]]
+; DEFAULT-NEXT: [[TMP6:%.*]] = load i32, ptr [[C]], align 4, !alias.scope [[META13:![0-9]+]]
+; DEFAULT-NEXT: [[TMP7:%.*]] = icmp ugt i32 [[TMP6]], [[TMP5]]
+; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i1> poison, i1 [[TMP7]], i64 0
+; DEFAULT-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i1> [[BROADCAST_SPLATINSERT]], <4 x i1> poison, <4 x i32> zeroinitializer
; DEFAULT-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[D]], i64 [[INDEX]]
-; DEFAULT-NEXT: [[TMP20:%.*]] = extractelement <4 x i1> [[TMP8]], i32 0
-; DEFAULT-NEXT: br i1 [[TMP20]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]]
+; DEFAULT-NEXT: br i1 [[TMP7]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]]
; DEFAULT: [[PRED_STORE_IF]]:
-; DEFAULT-NEXT: [[TMP11:%.*]] = extractelement <4 x i32> [[TMP6]], i32 0
-; DEFAULT-NEXT: store i32 [[TMP11]], ptr [[E]], align 4, !alias.scope [[META15:![0-9]+]], !noalias [[META17:![0-9]+]]
+; DEFAULT-NEXT: store i32 [[TMP5]], ptr [[E]], align 4, !alias.scope [[META15:![0-9]+]], !noalias [[META17:![0-9]+]]
; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE]]
; DEFAULT: [[PRED_STORE_CONTINUE]]:
-; DEFAULT-NEXT: [[TMP12:%.*]] = extractelement <4 x i1> [[TMP8]], i32 1
-; DEFAULT-NEXT: br i1 [[TMP12]], label %[[PRED_STORE_IF32:.*]], label %[[PRED_STORE_CONTINUE33:.*]]
+; DEFAULT-NEXT: br i1 [[TMP7]], label %[[PRED_STORE_IF28:.*]], label %[[PRED_STORE_CONTINUE29:.*]]
+; DEFAULT: [[PRED_STORE_IF28]]:
+; DEFAULT-NEXT: store i32 [[TMP5]], ptr [[E]], align 4, !alias.scope [[META15]], !noalias [[META17]]
+; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE29]]
+; DEFAULT: [[PRED_STORE_CONTINUE29]]:
+; DEFAULT-NEXT: br i1 [[TMP7]], label %[[PRED_STORE_IF30:.*]], label %[[PRED_STORE_CONTINUE31:.*]]
+; DEFAULT: [[PRED_STORE_IF30]]:
+; DEFAULT-NEXT: store i32 [[TMP5]], ptr [[E]], align 4, !alias.scope [[META15]], !noalias [[META17]]
+; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE31]]
+; DEFAULT: [[PRED_STORE_CONTINUE31]]:
+; DEFAULT-NEXT: br i1 [[TMP7]], label %[[PRED_STORE_IF32:.*]], label %[[PRED_STORE_CONTINUE33]]
; DEFAULT: [[PRED_STORE_IF32]]:
-; DEFAULT-NEXT: [[TMP13:%.*]] = extractelement <4 x i32> [[TMP6]], i32 0
-; DEFAULT-NEXT: store i32 [[TMP13]], ptr [[E]], align 4, !alias.scope [[META15]], !noalias [[META17]]
+; DEFAULT-NEXT: store i32 [[TMP5]], ptr [[E]], align 4, !alias.scope [[META15]], !noalias [[META17]]
; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE33]]
; DEFAULT: [[PRED_STORE_CONTINUE33]]:
-; DEFAULT-NEXT: [[TMP14:%.*]] = extractelement <4 x i1> [[TMP8]], i32 2
-; DEFAULT-NEXT: br i1 [[TMP14]], label %[[PRED_STORE_IF34:.*]], label %[[PRED_STORE_CONTINUE35:.*]]
-; DEFAULT: [[PRED_STORE_IF34]]:
-; DEFAULT-NEXT: [[TMP15:%.*]] = extractelement <4 x i32> [[TMP6]], i32 0
-; DEFAULT-NEXT: store i32 [[TMP15]], ptr [[E]], align 4, !alias.scope [[META15]], !noalias [[META17]]
-; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE35]]
-; DEFAULT: [[PRED_STORE_CONTINUE35]]:
-; DEFAULT-NEXT: [[TMP21:%.*]] = extractelement <4 x i1> [[TMP8]], i32 3
-; DEFAULT-NEXT: br i1 [[TMP21]], label %[[PRED_STORE_IF36:.*]], label %[[PRED_STORE_CONTINUE37]]
-; DEFAULT: [[PRED_STORE_IF36]]:
-; DEFAULT-NEXT: [[TMP22:%.*]] = extractelement <4 x i32> [[TMP6]], i32 0
-; DEFAULT-NEXT: store i32 [[TMP22]], ptr [[E]], align 4, !alias.scope [[META15]], !noalias [[META17]]
-; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE37]]
-; DEFAULT: [[PRED_STORE_CONTINUE37]]:
-; DEFAULT-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> zeroinitializer, ptr align 4 [[TMP16]], <4 x i1> [[TMP8]]), !alias.scope [[META19:![0-9]+]], !noalias [[META20:![0-9]+]]
+; DEFAULT-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> zeroinitializer, ptr align 4 [[TMP16]], <4 x i1> [[BROADCAST_SPLAT]]), !alias.scope [[META19:![0-9]+]], !noalias [[META20:![0-9]+]]
; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; DEFAULT-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; DEFAULT-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
@@ -613,63 +601,17 @@ exit:
define void @low_trip_count_fold_tail_scalarized_store(ptr %dst) {
; COMMON-LABEL: define void @low_trip_count_fold_tail_scalarized_store(
; COMMON-SAME: ptr [[DST:%.*]]) {
-; COMMON-NEXT: [[ENTRY:.*:]]
-; COMMON-NEXT: br label %[[VECTOR_PH:.*]]
-; COMMON: [[VECTOR_PH]]:
-; COMMON-NEXT: br label %[[VECTOR_BODY:.*]]
-; COMMON: [[VECTOR_BODY]]:
-; COMMON-NEXT: br i1 true, label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]]
-; COMMON: [[PRED_STORE_IF]]:
-; COMMON-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[DST]], i64 0
-; COMMON-NEXT: store i8 0, ptr [[TMP0]], align 1
-; COMMON-NEXT: br label %[[PRED_STORE_CONTINUE]]
-; COMMON: [[PRED_STORE_CONTINUE]]:
-; COMMON-NEXT: br i1 true, label %[[PRED_STORE_IF1:.*]], label %[[PRED_STORE_CONTINUE2:.*]]
-; COMMON: [[PRED_STORE_IF1]]:
-; COMMON-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[DST]], i64 1
-; COMMON-NEXT: store i8 1, ptr [[TMP1]], align 1
-; COMMON-NEXT: br label %[[PRED_STORE_CONTINUE2]]
-; COMMON: [[PRED_STORE_CONTINUE2]]:
-; COMMON-NEXT: br i1 true, label %[[PRED_STORE_IF3:.*]], label %[[PRED_STORE_CONTINUE4:.*]]
-; COMMON: [[PRED_STORE_IF3]]:
-; COMMON-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[DST]], i64 2
-; COMMON-NEXT: store i8 2, ptr [[TMP2]], align 1
-; COMMON-NEXT: br label %[[PRED_STORE_CONTINUE4]]
-; COMMON: [[PRED_STORE_CONTINUE4]]:
-; COMMON-NEXT: br i1 true, label %[[PRED_STORE_IF5:.*]], label %[[PRED_STORE_CONTINUE6:.*]]
-; COMMON: [[PRED_STORE_IF5]]:
-; COMMON-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[DST]], i64 3
-; COMMON-NEXT: store i8 3, ptr [[TMP3]], align 1
-; COMMON-NEXT: br label %[[PRED_STORE_CONTINUE6]]
-; COMMON: [[PRED_STORE_CONTINUE6]]:
-; COMMON-NEXT: br i1 true, label %[[PRED_STORE_IF7:.*]], label %[[PRED_STORE_CONTINUE8:.*]]
-; COMMON: [[PRED_STORE_IF7]]:
-; COMMON-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[DST]], i64 4
-; COMMON-NEXT: store i8 4, ptr [[TMP4]], align 1
-; COMMON-NEXT: br label %[[PRED_STORE_CONTINUE8]]
-; COMMON: [[PRED_STORE_CONTINUE8]]:
-; COMMON-NEXT: br i1 true, label %[[PRED_STORE_IF9:.*]], label %[[PRED_STORE_CONTINUE10:.*]]
-; COMMON: [[PRED_STORE_IF9]]:
-; COMMON-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[DST]], i64 5
-; COMMON-NEXT: store i8 5, ptr [[TMP5]], align 1
-; COMMON-NEXT: br label %[[PRED_STORE_CONTINUE10]]
-; COMMON: [[PRED_STORE_CONTINUE10]]:
-; COMMON-NEXT: br i1 true, label %[[PRED_STORE_IF11:.*]], label %[[PRED_STORE_CONTINUE12:.*]]
-; COMMON: [[PRED_STORE_IF11]]:
-; COMMON-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[DST]], i64 6
-; COMMON-NEXT: store i8 6, ptr [[TMP6]], align 1
-; COMMON-NEXT: br label %[[PRED_STORE_CONTINUE12]]
-; COMMON: [[PRED_STORE_CONTINUE12]]:
-; COMMON-NEXT: br i1 false, label %[[PRED_STORE_IF13:.*]], label %[[EXIT:.*]]
-; COMMON: [[PRED_STORE_IF13]]:
-; COMMON-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[DST]], i64 7
-; COMMON-NEXT: store i8 7, ptr [[TMP7]], align 1
-; COMMON-NEXT: br label %[[EXIT]]
+; COMMON-NEXT: [[ENTRY:.*]]:
+; COMMON-NEXT: br label %[[LOOP:.*]]
+; COMMON: [[LOOP]]:
+; COMMON-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; COMMON-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i8
+; COMMON-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV]]
+; COMMON-NEXT: store i8 [[IV_TRUNC]], ptr [[GEP]], align 1
+; COMMON-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
+; COMMON-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 7
+; COMMON-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]]
; COMMON: [[EXIT]]:
-; COMMON-NEXT: br label %[[SCALAR_PH:.*]]
-; COMMON: [[SCALAR_PH]]:
-; COMMON-NEXT: br label %[[EXIT1:.*]]
-; COMMON: [[EXIT1]]:
; COMMON-NEXT: ret void
;
entry:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/extractvalue-no-scalarization-required.ll b/llvm/test/Transforms/LoopVectorize/AArch64/extractvalue-no-scalarization-required.ll
index 5970608..bea34e2 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/extractvalue-no-scalarization-required.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/extractvalue-no-scalarization-required.ll
@@ -16,7 +16,7 @@
; CM: vector.ph:
; CM: CLONE ir<%a> = extractvalue ir<%sv>
; CM: CLONE ir<%b> = extractvalue ir<%sv>
-; CM: WIDEN ir<%add> = add ir<%a>, ir<%b>
+; CM: CLONE ir<%add> = add ir<%a>, ir<%b>
; CM: Successor(s): vector loop
; CM: LV: Scalar loop costs: 5.
@@ -30,23 +30,22 @@ define void @test1(ptr %dst, {i64, i64} %sv) {
; FORCED-NEXT: br label %[[VECTOR_PH:.*]]
; FORCED: [[VECTOR_PH]]:
; FORCED-NEXT: [[TMP0:%.*]] = extractvalue { i64, i64 } [[SV]], 0
-; FORCED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64> poison, i64 [[TMP0]], i64 0
-; FORCED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT]], <2 x i64> poison, <2 x i32> zeroinitializer
; FORCED-NEXT: [[TMP4:%.*]] = extractvalue { i64, i64 } [[SV]], 1
-; FORCED-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <2 x i64> poison, i64 [[TMP4]], i64 0
+; FORCED-NEXT: [[TMP5:%.*]] = add i64 [[TMP0]], [[TMP4]]
+; FORCED-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <2 x i64> poison, i64 [[TMP5]], i64 0
; FORCED-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT1]], <2 x i64> poison, <2 x i32> zeroinitializer
-; FORCED-NEXT: [[TMP1:%.*]] = add <2 x i64> [[BROADCAST_SPLAT]], [[BROADCAST_SPLAT2]]
; FORCED-NEXT: br label %[[VECTOR_BODY:.*]]
; FORCED: [[VECTOR_BODY]]:
; FORCED-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; FORCED-NEXT: [[TMP2:%.*]] = getelementptr i64, ptr [[DST]], i32 [[INDEX]]
-; FORCED-NEXT: store <2 x i64> [[TMP1]], ptr [[TMP2]], align 4
+; FORCED-NEXT: store <2 x i64> [[BROADCAST_SPLAT2]], ptr [[TMP2]], align 4
; FORCED-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2
; FORCED-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000
; FORCED-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; FORCED: [[MIDDLE_BLOCK]]:
-; FORCED-NEXT: br [[EXIT:label %.*]]
-; FORCED: [[SCALAR_PH:.*:]]
+; FORCED-NEXT: br label %[[EXIT:.*]]
+; FORCED: [[EXIT]]:
+; FORCED-NEXT: ret void
;
entry:
br label %loop.body
@@ -99,10 +98,11 @@ define void @test_getVectorCallCost(ptr %dst, {float, float} %sv) {
; FORCED-NEXT: store <2 x float> [[TMP2]], ptr [[TMP1]], align 4
; FORCED-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2
; FORCED-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000
-; FORCED-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; FORCED-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; FORCED: [[MIDDLE_BLOCK]]:
-; FORCED-NEXT: br [[EXIT:label %.*]]
-; FORCED: [[SCALAR_PH:.*:]]
+; FORCED-NEXT: br label %[[EXIT:.*]]
+; FORCED: [[EXIT]]:
+; FORCED-NEXT: ret void
;
entry:
br label %loop.body
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs-sve.ll b/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs-sve.ll
index cfc6cc8..4b097ba 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs-sve.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/induction-costs-sve.ll
@@ -271,69 +271,11 @@ define void @iv_trunc(i32 %x, ptr %dst, i64 %N) #0 {
;
; PRED-LABEL: define void @iv_trunc(
; PRED-SAME: i32 [[X:%.*]], ptr [[DST:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; PRED-NEXT: [[ENTRY:.*:]]
+; PRED-NEXT: [[ENTRY:.*]]:
; PRED-NEXT: [[MUL_X:%.*]] = add i32 [[X]], 1
-; PRED-NEXT: [[TMP0:%.*]] = add i64 [[N]], 1
-; PRED-NEXT: br label %[[VECTOR_SCEVCHECK:.*]]
-; PRED: [[VECTOR_SCEVCHECK]]:
-; PRED-NEXT: [[TMP1:%.*]] = sub i32 -1, [[X]]
-; PRED-NEXT: [[TMP2:%.*]] = icmp slt i32 [[MUL_X]], 0
-; PRED-NEXT: [[TMP3:%.*]] = select i1 [[TMP2]], i32 [[TMP1]], i32 [[MUL_X]]
-; PRED-NEXT: [[TMP4:%.*]] = trunc i64 [[N]] to i32
-; PRED-NEXT: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[TMP3]], i32 [[TMP4]])
-; PRED-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL]], 0
-; PRED-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL]], 1
-; PRED-NEXT: [[TMP5:%.*]] = sub i32 0, [[MUL_RESULT]]
-; PRED-NEXT: [[TMP6:%.*]] = icmp ugt i32 [[TMP5]], 0
-; PRED-NEXT: [[TMP7:%.*]] = select i1 [[TMP2]], i1 [[TMP6]], i1 false
-; PRED-NEXT: [[TMP8:%.*]] = or i1 [[TMP7]], [[MUL_OVERFLOW]]
-; PRED-NEXT: [[TMP9:%.*]] = icmp ugt i64 [[N]], 4294967295
-; PRED-NEXT: [[TMP10:%.*]] = icmp ne i32 [[MUL_X]], 0
-; PRED-NEXT: [[TMP11:%.*]] = and i1 [[TMP9]], [[TMP10]]
-; PRED-NEXT: [[TMP12:%.*]] = or i1 [[TMP8]], [[TMP11]]
-; PRED-NEXT: br i1 [[TMP12]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
-; PRED: [[VECTOR_PH]]:
-; PRED-NEXT: [[TMP13:%.*]] = sub i64 [[TMP0]], 2
-; PRED-NEXT: [[TMP14:%.*]] = icmp ugt i64 [[TMP0]], 2
-; PRED-NEXT: [[TMP15:%.*]] = select i1 [[TMP14]], i64 [[TMP13]], i64 0
-; PRED-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <2 x i1> @llvm.get.active.lane.mask.v2i1.i64(i64 0, i64 [[TMP0]])
-; PRED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i32> poison, i32 [[MUL_X]], i64 0
-; PRED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i32> [[BROADCAST_SPLATINSERT]], <2 x i32> poison, <2 x i32> zeroinitializer
-; PRED-NEXT: br label %[[VECTOR_BODY:.*]]
-; PRED: [[VECTOR_BODY]]:
-; PRED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE2:.*]] ]
-; PRED-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], %[[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[PRED_STORE_CONTINUE2]] ]
-; PRED-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ <i32 0, i32 1>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_STORE_CONTINUE2]] ]
-; PRED-NEXT: [[TMP16:%.*]] = mul <2 x i32> [[BROADCAST_SPLAT]], [[VEC_IND]]
-; PRED-NEXT: [[TMP17:%.*]] = zext <2 x i32> [[TMP16]] to <2 x i64>
-; PRED-NEXT: [[TMP18:%.*]] = extractelement <2 x i1> [[ACTIVE_LANE_MASK]], i32 0
-; PRED-NEXT: br i1 [[TMP18]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]]
-; PRED: [[PRED_STORE_IF]]:
-; PRED-NEXT: [[TMP19:%.*]] = extractelement <2 x i64> [[TMP17]], i32 0
-; PRED-NEXT: [[TMP20:%.*]] = getelementptr i32, ptr [[DST]], i64 [[TMP19]]
-; PRED-NEXT: store i32 1, ptr [[TMP20]], align 4
-; PRED-NEXT: br label %[[PRED_STORE_CONTINUE]]
-; PRED: [[PRED_STORE_CONTINUE]]:
-; PRED-NEXT: [[TMP21:%.*]] = extractelement <2 x i1> [[ACTIVE_LANE_MASK]], i32 1
-; PRED-NEXT: br i1 [[TMP21]], label %[[PRED_STORE_IF1:.*]], label %[[PRED_STORE_CONTINUE2]]
-; PRED: [[PRED_STORE_IF1]]:
-; PRED-NEXT: [[TMP22:%.*]] = extractelement <2 x i64> [[TMP17]], i32 1
-; PRED-NEXT: [[TMP23:%.*]] = getelementptr i32, ptr [[DST]], i64 [[TMP22]]
-; PRED-NEXT: store i32 1, ptr [[TMP23]], align 4
-; PRED-NEXT: br label %[[PRED_STORE_CONTINUE2]]
-; PRED: [[PRED_STORE_CONTINUE2]]:
-; PRED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 2
-; PRED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <2 x i1> @llvm.get.active.lane.mask.v2i1.i64(i64 [[INDEX]], i64 [[TMP15]])
-; PRED-NEXT: [[TMP24:%.*]] = extractelement <2 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0
-; PRED-NEXT: [[TMP25:%.*]] = xor i1 [[TMP24]], true
-; PRED-NEXT: [[VEC_IND_NEXT]] = add <2 x i32> [[VEC_IND]], splat (i32 2)
-; PRED-NEXT: br i1 [[TMP25]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
-; PRED: [[MIDDLE_BLOCK]]:
-; PRED-NEXT: br label %[[EXIT:.*]]
-; PRED: [[SCALAR_PH]]:
; PRED-NEXT: br label %[[FOR_BODY:.*]]
; PRED: [[FOR_BODY]]:
-; PRED-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; PRED-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
; PRED-NEXT: [[TRUNC_IV:%.*]] = trunc i64 [[IV]] to i32
; PRED-NEXT: [[ADD_I:%.*]] = mul i32 [[MUL_X]], [[TRUNC_IV]]
; PRED-NEXT: [[IV_MUL:%.*]] = zext i32 [[ADD_I]] to i64
@@ -341,7 +283,7 @@ define void @iv_trunc(i32 %x, ptr %dst, i64 %N) #0 {
; PRED-NEXT: store i32 1, ptr [[GEP]], align 4
; PRED-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
; PRED-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], [[N]]
-; PRED-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; PRED-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[FOR_BODY]]
; PRED: [[EXIT]]:
; PRED-NEXT: ret void
;
@@ -437,101 +379,21 @@ define void @trunc_ivs_and_store(i32 %x, ptr %dst, i64 %N) #0 {
;
; PRED-LABEL: define void @trunc_ivs_and_store(
; PRED-SAME: i32 [[X:%.*]], ptr [[DST:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; PRED-NEXT: [[ENTRY:.*:]]
-; PRED-NEXT: [[MUL:%.*]] = mul i32 [[X]], [[X]]
-; PRED-NEXT: [[TMP0:%.*]] = add i64 [[N]], 1
-; PRED-NEXT: br label %[[VECTOR_SCEVCHECK:.*]]
-; PRED: [[VECTOR_SCEVCHECK]]:
+; PRED-NEXT: [[ENTRY:.*]]:
; PRED-NEXT: [[TMP1:%.*]] = mul i32 [[X]], [[X]]
-; PRED-NEXT: [[TMP2:%.*]] = sub i32 0, [[TMP1]]
-; PRED-NEXT: [[TMP3:%.*]] = icmp slt i32 [[MUL]], 0
-; PRED-NEXT: [[TMP4:%.*]] = select i1 [[TMP3]], i32 [[TMP2]], i32 [[MUL]]
-; PRED-NEXT: [[TMP5:%.*]] = trunc i64 [[N]] to i32
-; PRED-NEXT: [[MUL1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[TMP4]], i32 [[TMP5]])
-; PRED-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL1]], 0
-; PRED-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL1]], 1
-; PRED-NEXT: [[TMP6:%.*]] = sub i32 0, [[MUL_RESULT]]
-; PRED-NEXT: [[TMP7:%.*]] = icmp ugt i32 [[TMP6]], 0
-; PRED-NEXT: [[TMP8:%.*]] = select i1 [[TMP3]], i1 [[TMP7]], i1 false
-; PRED-NEXT: [[TMP9:%.*]] = or i1 [[TMP8]], [[MUL_OVERFLOW]]
-; PRED-NEXT: [[TMP10:%.*]] = icmp ugt i64 [[N]], 4294967295
-; PRED-NEXT: [[TMP11:%.*]] = icmp ne i32 [[MUL]], 0
-; PRED-NEXT: [[TMP12:%.*]] = and i1 [[TMP10]], [[TMP11]]
-; PRED-NEXT: [[TMP13:%.*]] = or i1 [[TMP9]], [[TMP12]]
-; PRED-NEXT: br i1 [[TMP13]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
-; PRED: [[VECTOR_PH]]:
-; PRED-NEXT: [[TMP14:%.*]] = sub i64 [[TMP0]], 4
-; PRED-NEXT: [[TMP15:%.*]] = icmp ugt i64 [[TMP0]], 4
-; PRED-NEXT: [[TMP16:%.*]] = select i1 [[TMP15]], i64 [[TMP14]], i64 0
-; PRED-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 0, i64 [[TMP0]])
-; PRED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[MUL]], i64 0
-; PRED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
-; PRED-NEXT: br label %[[VECTOR_BODY:.*]]
-; PRED: [[VECTOR_BODY]]:
-; PRED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE7:.*]] ]
-; PRED-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], %[[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[PRED_STORE_CONTINUE7]] ]
-; PRED-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_STORE_CONTINUE7]] ]
-; PRED-NEXT: [[OFFSET_IDX:%.*]] = trunc i64 [[INDEX]] to i32
-; PRED-NEXT: [[TMP17:%.*]] = mul <4 x i32> [[BROADCAST_SPLAT]], [[VEC_IND]]
-; PRED-NEXT: [[TMP18:%.*]] = zext <4 x i32> [[TMP17]] to <4 x i64>
-; PRED-NEXT: [[TMP19:%.*]] = extractelement <4 x i1> [[ACTIVE_LANE_MASK]], i32 0
-; PRED-NEXT: br i1 [[TMP19]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]]
-; PRED: [[PRED_STORE_IF]]:
-; PRED-NEXT: [[TMP20:%.*]] = extractelement <4 x i64> [[TMP18]], i32 0
-; PRED-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr [[DST]], i64 [[TMP20]]
-; PRED-NEXT: [[TMP22:%.*]] = add i32 [[OFFSET_IDX]], 0
-; PRED-NEXT: store i32 [[TMP22]], ptr [[TMP21]], align 4
-; PRED-NEXT: br label %[[PRED_STORE_CONTINUE]]
-; PRED: [[PRED_STORE_CONTINUE]]:
-; PRED-NEXT: [[TMP23:%.*]] = extractelement <4 x i1> [[ACTIVE_LANE_MASK]], i32 1
-; PRED-NEXT: br i1 [[TMP23]], label %[[PRED_STORE_IF2:.*]], label %[[PRED_STORE_CONTINUE3:.*]]
-; PRED: [[PRED_STORE_IF2]]:
-; PRED-NEXT: [[TMP24:%.*]] = extractelement <4 x i64> [[TMP18]], i32 1
-; PRED-NEXT: [[TMP25:%.*]] = getelementptr i32, ptr [[DST]], i64 [[TMP24]]
-; PRED-NEXT: [[TMP26:%.*]] = add i32 [[OFFSET_IDX]], 1
-; PRED-NEXT: store i32 [[TMP26]], ptr [[TMP25]], align 4
-; PRED-NEXT: br label %[[PRED_STORE_CONTINUE3]]
-; PRED: [[PRED_STORE_CONTINUE3]]:
-; PRED-NEXT: [[TMP27:%.*]] = extractelement <4 x i1> [[ACTIVE_LANE_MASK]], i32 2
-; PRED-NEXT: br i1 [[TMP27]], label %[[PRED_STORE_IF4:.*]], label %[[PRED_STORE_CONTINUE5:.*]]
-; PRED: [[PRED_STORE_IF4]]:
-; PRED-NEXT: [[TMP28:%.*]] = extractelement <4 x i64> [[TMP18]], i32 2
-; PRED-NEXT: [[TMP29:%.*]] = getelementptr i32, ptr [[DST]], i64 [[TMP28]]
-; PRED-NEXT: [[TMP30:%.*]] = add i32 [[OFFSET_IDX]], 2
-; PRED-NEXT: store i32 [[TMP30]], ptr [[TMP29]], align 4
-; PRED-NEXT: br label %[[PRED_STORE_CONTINUE5]]
-; PRED: [[PRED_STORE_CONTINUE5]]:
-; PRED-NEXT: [[TMP31:%.*]] = extractelement <4 x i1> [[ACTIVE_LANE_MASK]], i32 3
-; PRED-NEXT: br i1 [[TMP31]], label %[[PRED_STORE_IF6:.*]], label %[[PRED_STORE_CONTINUE7]]
-; PRED: [[PRED_STORE_IF6]]:
-; PRED-NEXT: [[TMP32:%.*]] = extractelement <4 x i64> [[TMP18]], i32 3
-; PRED-NEXT: [[TMP33:%.*]] = getelementptr i32, ptr [[DST]], i64 [[TMP32]]
-; PRED-NEXT: [[TMP34:%.*]] = add i32 [[OFFSET_IDX]], 3
-; PRED-NEXT: store i32 [[TMP34]], ptr [[TMP33]], align 4
-; PRED-NEXT: br label %[[PRED_STORE_CONTINUE7]]
-; PRED: [[PRED_STORE_CONTINUE7]]:
-; PRED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4
-; PRED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 [[INDEX]], i64 [[TMP16]])
-; PRED-NEXT: [[TMP35:%.*]] = extractelement <4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0
-; PRED-NEXT: [[TMP36:%.*]] = xor i1 [[TMP35]], true
-; PRED-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], splat (i32 4)
-; PRED-NEXT: br i1 [[TMP36]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
-; PRED: [[MIDDLE_BLOCK]]:
-; PRED-NEXT: br label %[[EXIT:.*]]
-; PRED: [[SCALAR_PH]]:
; PRED-NEXT: br label %[[LOOP:.*]]
; PRED: [[LOOP]]:
-; PRED-NEXT: [[IV_1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_1_NEXT:%.*]], %[[LOOP]] ]
-; PRED-NEXT: [[IV_2:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], %[[LOOP]] ]
+; PRED-NEXT: [[IV_1:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_1_NEXT:%.*]], %[[LOOP]] ]
+; PRED-NEXT: [[IV_2:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[IV_2_NEXT:%.*]], %[[LOOP]] ]
; PRED-NEXT: [[IV_1_TRUNC:%.*]] = trunc i64 [[IV_1]] to i32
-; PRED-NEXT: [[IV_1_MUL:%.*]] = mul i32 [[MUL]], [[IV_1_TRUNC]]
+; PRED-NEXT: [[IV_1_MUL:%.*]] = mul i32 [[TMP1]], [[IV_1_TRUNC]]
; PRED-NEXT: [[IV_2_NEXT]] = add i32 [[IV_2]], 1
; PRED-NEXT: [[MUL_EXT:%.*]] = zext i32 [[IV_1_MUL]] to i64
; PRED-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[DST]], i64 [[MUL_EXT]]
; PRED-NEXT: store i32 [[IV_2]], ptr [[GEP]], align 4
; PRED-NEXT: [[IV_1_NEXT]] = add i64 [[IV_1]], 1
; PRED-NEXT: [[EXITCOND_3_NOT:%.*]] = icmp eq i64 [[IV_1]], [[N]]
-; PRED-NEXT: br i1 [[EXITCOND_3_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP7:![0-9]+]]
+; PRED-NEXT: br i1 [[EXITCOND_3_NOT]], label %[[EXIT:.*]], label %[[LOOP]]
; PRED: [[EXIT]]:
; PRED-NEXT: ret void
;
@@ -627,91 +489,12 @@ define void @ivs_trunc_and_ext(i32 %x, ptr %dst, i64 %N) #0 {
;
; PRED-LABEL: define void @ivs_trunc_and_ext(
; PRED-SAME: i32 [[X:%.*]], ptr [[DST:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
-; PRED-NEXT: [[ENTRY:.*:]]
+; PRED-NEXT: [[ENTRY:.*]]:
; PRED-NEXT: [[ADD:%.*]] = add i32 [[X]], 1
-; PRED-NEXT: [[TMP0:%.*]] = add i64 [[N]], 1
-; PRED-NEXT: br label %[[VECTOR_SCEVCHECK:.*]]
-; PRED: [[VECTOR_SCEVCHECK]]:
-; PRED-NEXT: [[TMP1:%.*]] = sub i32 -1, [[X]]
-; PRED-NEXT: [[TMP2:%.*]] = icmp slt i32 [[ADD]], 0
-; PRED-NEXT: [[TMP3:%.*]] = select i1 [[TMP2]], i32 [[TMP1]], i32 [[ADD]]
-; PRED-NEXT: [[TMP4:%.*]] = trunc i64 [[N]] to i32
-; PRED-NEXT: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[TMP3]], i32 [[TMP4]])
-; PRED-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL]], 0
-; PRED-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL]], 1
-; PRED-NEXT: [[TMP5:%.*]] = sub i32 0, [[MUL_RESULT]]
-; PRED-NEXT: [[TMP6:%.*]] = icmp ugt i32 [[TMP5]], 0
-; PRED-NEXT: [[TMP7:%.*]] = select i1 [[TMP2]], i1 [[TMP6]], i1 false
-; PRED-NEXT: [[TMP8:%.*]] = or i1 [[TMP7]], [[MUL_OVERFLOW]]
-; PRED-NEXT: [[TMP9:%.*]] = icmp ugt i64 [[N]], 4294967295
-; PRED-NEXT: [[TMP10:%.*]] = icmp ne i32 [[ADD]], 0
-; PRED-NEXT: [[TMP11:%.*]] = and i1 [[TMP9]], [[TMP10]]
-; PRED-NEXT: [[TMP12:%.*]] = or i1 [[TMP8]], [[TMP11]]
-; PRED-NEXT: br i1 [[TMP12]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
-; PRED: [[VECTOR_PH]]:
-; PRED-NEXT: [[TMP13:%.*]] = sub i64 [[TMP0]], 4
-; PRED-NEXT: [[TMP14:%.*]] = icmp ugt i64 [[TMP0]], 4
-; PRED-NEXT: [[TMP15:%.*]] = select i1 [[TMP14]], i64 [[TMP13]], i64 0
-; PRED-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 0, i64 [[TMP0]])
-; PRED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[ADD]], i64 0
-; PRED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
-; PRED-NEXT: br label %[[VECTOR_BODY:.*]]
-; PRED: [[VECTOR_BODY]]:
-; PRED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE6:.*]] ]
-; PRED-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], %[[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[PRED_STORE_CONTINUE6]] ]
-; PRED-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_STORE_CONTINUE6]] ]
-; PRED-NEXT: [[OFFSET_IDX:%.*]] = trunc i64 [[INDEX]] to i32
-; PRED-NEXT: [[TMP16:%.*]] = mul <4 x i32> [[BROADCAST_SPLAT]], [[VEC_IND]]
-; PRED-NEXT: [[TMP17:%.*]] = zext <4 x i32> [[TMP16]] to <4 x i64>
-; PRED-NEXT: [[TMP18:%.*]] = extractelement <4 x i1> [[ACTIVE_LANE_MASK]], i32 0
-; PRED-NEXT: br i1 [[TMP18]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]]
-; PRED: [[PRED_STORE_IF]]:
-; PRED-NEXT: [[TMP19:%.*]] = extractelement <4 x i64> [[TMP17]], i32 0
-; PRED-NEXT: [[TMP20:%.*]] = getelementptr i32, ptr [[DST]], i64 [[TMP19]]
-; PRED-NEXT: [[TMP21:%.*]] = add i32 [[OFFSET_IDX]], 0
-; PRED-NEXT: store i32 [[TMP21]], ptr [[TMP20]], align 4
-; PRED-NEXT: br label %[[PRED_STORE_CONTINUE]]
-; PRED: [[PRED_STORE_CONTINUE]]:
-; PRED-NEXT: [[TMP22:%.*]] = extractelement <4 x i1> [[ACTIVE_LANE_MASK]], i32 1
-; PRED-NEXT: br i1 [[TMP22]], label %[[PRED_STORE_IF1:.*]], label %[[PRED_STORE_CONTINUE2:.*]]
-; PRED: [[PRED_STORE_IF1]]:
-; PRED-NEXT: [[TMP23:%.*]] = extractelement <4 x i64> [[TMP17]], i32 1
-; PRED-NEXT: [[TMP24:%.*]] = getelementptr i32, ptr [[DST]], i64 [[TMP23]]
-; PRED-NEXT: [[TMP25:%.*]] = add i32 [[OFFSET_IDX]], 1
-; PRED-NEXT: store i32 [[TMP25]], ptr [[TMP24]], align 4
-; PRED-NEXT: br label %[[PRED_STORE_CONTINUE2]]
-; PRED: [[PRED_STORE_CONTINUE2]]:
-; PRED-NEXT: [[TMP26:%.*]] = extractelement <4 x i1> [[ACTIVE_LANE_MASK]], i32 2
-; PRED-NEXT: br i1 [[TMP26]], label %[[PRED_STORE_IF3:.*]], label %[[PRED_STORE_CONTINUE4:.*]]
-; PRED: [[PRED_STORE_IF3]]:
-; PRED-NEXT: [[TMP27:%.*]] = extractelement <4 x i64> [[TMP17]], i32 2
-; PRED-NEXT: [[TMP28:%.*]] = getelementptr i32, ptr [[DST]], i64 [[TMP27]]
-; PRED-NEXT: [[TMP29:%.*]] = add i32 [[OFFSET_IDX]], 2
-; PRED-NEXT: store i32 [[TMP29]], ptr [[TMP28]], align 4
-; PRED-NEXT: br label %[[PRED_STORE_CONTINUE4]]
-; PRED: [[PRED_STORE_CONTINUE4]]:
-; PRED-NEXT: [[TMP30:%.*]] = extractelement <4 x i1> [[ACTIVE_LANE_MASK]], i32 3
-; PRED-NEXT: br i1 [[TMP30]], label %[[PRED_STORE_IF5:.*]], label %[[PRED_STORE_CONTINUE6]]
-; PRED: [[PRED_STORE_IF5]]:
-; PRED-NEXT: [[TMP31:%.*]] = extractelement <4 x i64> [[TMP17]], i32 3
-; PRED-NEXT: [[TMP32:%.*]] = getelementptr i32, ptr [[DST]], i64 [[TMP31]]
-; PRED-NEXT: [[TMP33:%.*]] = add i32 [[OFFSET_IDX]], 3
-; PRED-NEXT: store i32 [[TMP33]], ptr [[TMP32]], align 4
-; PRED-NEXT: br label %[[PRED_STORE_CONTINUE6]]
-; PRED: [[PRED_STORE_CONTINUE6]]:
-; PRED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4
-; PRED-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i64(i64 [[INDEX]], i64 [[TMP15]])
-; PRED-NEXT: [[TMP34:%.*]] = extractelement <4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0
-; PRED-NEXT: [[TMP35:%.*]] = xor i1 [[TMP34]], true
-; PRED-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], splat (i32 4)
-; PRED-NEXT: br i1 [[TMP35]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
-; PRED: [[MIDDLE_BLOCK]]:
-; PRED-NEXT: br label %[[EXIT:.*]]
-; PRED: [[SCALAR_PH]]:
; PRED-NEXT: br label %[[LOOP:.*]]
; PRED: [[LOOP]]:
-; PRED-NEXT: [[IV_1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_1_NEXT:%.*]], %[[LOOP]] ]
-; PRED-NEXT: [[IV_2:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_2_NEXT:%.*]], %[[LOOP]] ]
+; PRED-NEXT: [[IV_1:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_1_NEXT:%.*]], %[[LOOP]] ]
+; PRED-NEXT: [[IV_2:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[IV_2_NEXT:%.*]], %[[LOOP]] ]
; PRED-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV_1]] to i32
; PRED-NEXT: [[IV_MUL:%.*]] = mul i32 [[ADD]], [[IV_TRUNC]]
; PRED-NEXT: [[IV_2_NEXT]] = add i32 [[IV_2]], 1
@@ -720,7 +503,7 @@ define void @ivs_trunc_and_ext(i32 %x, ptr %dst, i64 %N) #0 {
; PRED-NEXT: store i32 [[IV_2]], ptr [[GEP]], align 4
; PRED-NEXT: [[IV_1_NEXT]] = add i64 [[IV_1]], 1
; PRED-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_1]], [[N]]
-; PRED-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP9:![0-9]+]]
+; PRED-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]]
; PRED: [[EXIT]]:
; PRED-NEXT: ret void
;
@@ -842,7 +625,7 @@ define void @exit_cond_zext_iv(ptr %dst, i64 %N) {
; PRED: [[PRED_STORE_CONTINUE5]]:
; PRED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 2
; PRED-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; PRED-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; PRED-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; PRED: [[MIDDLE_BLOCK]]:
; PRED-NEXT: br label %[[EXIT:.*]]
; PRED: [[SCALAR_PH]]:
@@ -855,7 +638,7 @@ define void @exit_cond_zext_iv(ptr %dst, i64 %N) {
; PRED-NEXT: [[IV_1_NEXT]] = add i32 [[IV_1]], 1
; PRED-NEXT: [[IV_EXT]] = zext i32 [[IV_1_NEXT]] to i64
; PRED-NEXT: [[C:%.*]] = icmp ult i64 [[IV_EXT]], [[N]]
-; PRED-NEXT: br i1 [[C]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP11:![0-9]+]]
+; PRED-NEXT: br i1 [[C]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP5:![0-9]+]]
; PRED: [[EXIT]]:
; PRED-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll
index d77ca98..37eac89 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll
@@ -1589,8 +1589,7 @@ define i64 @dotp_cost_disagreement(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = mul nuw nsw <16 x i64> [[TMP8]], [[TMP9]]
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE5]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI1]], <16 x i64> [[TMP10]])
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
-; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 32
-; CHECK-INTERLEAVED-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; CHECK-INTERLEAVED-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <2 x i64> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]]
; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[BIN_RDX]])
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-extractvalue.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-extractvalue.ll
index 0c6a490..eceda08 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-extractvalue.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-extractvalue.ll
@@ -17,17 +17,15 @@ define void @widen_extractvalue(ptr %dst, {i64, i64} %sv) #0 {
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 1000, [[TMP3]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 1000, [[N_MOD_VF]]
; CHECK-NEXT: [[EXTRACT0:%.*]] = extractvalue { i64, i64 } [[SV]], 0
-; CHECK-NEXT: [[DOTSPLATINSERT1:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[EXTRACT0]], i64 0
-; CHECK-NEXT: [[DOTSPLAT2:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT1]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP10:%.*]] = extractvalue { i64, i64 } [[SV]], 1
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP10]], i64 0
+; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[EXTRACT0]], [[TMP10]]
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP6]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT1]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP7:%.*]] = add <vscale x 2 x i64> [[DOTSPLAT2]], [[BROADCAST_SPLAT2]]
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[DST]], i32 [[INDEX]]
-; CHECK-NEXT: store <vscale x 2 x i64> [[TMP7]], ptr [[TMP8]], align 8
+; CHECK-NEXT: store <vscale x 2 x i64> [[BROADCAST_SPLAT2]], ptr [[TMP8]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP3]]
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/optsize_minsize.ll b/llvm/test/Transforms/LoopVectorize/ARM/optsize_minsize.ll
index 6ea075f..83be070 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/optsize_minsize.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/optsize_minsize.ll
@@ -181,178 +181,23 @@ for.cond.cleanup:
define void @tail_predicate_without_optsize(ptr %p, i8 %a, i8 %b, i8 %c, i32 %n) {
; DEFAULT-LABEL: define void @tail_predicate_without_optsize(
; DEFAULT-SAME: ptr [[P:%.*]], i8 [[A:%.*]], i8 [[B:%.*]], i8 [[C:%.*]], i32 [[N:%.*]]) {
-; DEFAULT-NEXT: [[ENTRY:.*:]]
-; DEFAULT-NEXT: br label %[[VECTOR_PH:.*]]
-; DEFAULT: [[VECTOR_PH]]:
-; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[A]], i64 0
-; DEFAULT-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
-; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <16 x i8> poison, i8 [[B]], i64 0
-; DEFAULT-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT3]], <16 x i8> poison, <16 x i32> zeroinitializer
-; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT5:%.*]] = insertelement <16 x i8> poison, i8 [[C]], i64 0
-; DEFAULT-NEXT: [[BROADCAST_SPLAT6:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT5]], <16 x i8> poison, <16 x i32> zeroinitializer
-; DEFAULT-NEXT: br label %[[VECTOR_BODY:.*]]
-; DEFAULT: [[VECTOR_BODY]]:
-; DEFAULT-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE35:.*]] ]
-; DEFAULT-NEXT: [[VEC_IND:%.*]] = phi <16 x i8> [ <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_STORE_CONTINUE35]] ]
-; DEFAULT-NEXT: [[VEC_IND1:%.*]] = phi <16 x i8> [ <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT2:%.*]], %[[PRED_STORE_CONTINUE35]] ]
-; DEFAULT-NEXT: [[TMP0:%.*]] = icmp ule <16 x i8> [[VEC_IND]], splat (i8 14)
-; DEFAULT-NEXT: [[TMP1:%.*]] = mul <16 x i8> [[BROADCAST_SPLAT]], [[VEC_IND1]]
-; DEFAULT-NEXT: [[TMP2:%.*]] = lshr <16 x i8> [[VEC_IND1]], splat (i8 1)
-; DEFAULT-NEXT: [[TMP3:%.*]] = mul <16 x i8> [[TMP2]], [[BROADCAST_SPLAT4]]
-; DEFAULT-NEXT: [[TMP4:%.*]] = add <16 x i8> [[TMP3]], [[TMP1]]
-; DEFAULT-NEXT: [[TMP5:%.*]] = lshr <16 x i8> [[VEC_IND1]], splat (i8 2)
-; DEFAULT-NEXT: [[TMP6:%.*]] = mul <16 x i8> [[TMP5]], [[BROADCAST_SPLAT6]]
-; DEFAULT-NEXT: [[TMP7:%.*]] = add <16 x i8> [[TMP4]], [[TMP6]]
-; DEFAULT-NEXT: [[TMP8:%.*]] = extractelement <16 x i1> [[TMP0]], i32 0
-; DEFAULT-NEXT: br i1 [[TMP8]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]]
-; DEFAULT: [[PRED_STORE_IF]]:
-; DEFAULT-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 0
-; DEFAULT-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP9]]
-; DEFAULT-NEXT: [[TMP11:%.*]] = extractelement <16 x i8> [[TMP7]], i32 0
-; DEFAULT-NEXT: store i8 [[TMP11]], ptr [[TMP10]], align 1
-; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE]]
-; DEFAULT: [[PRED_STORE_CONTINUE]]:
-; DEFAULT-NEXT: [[TMP12:%.*]] = extractelement <16 x i1> [[TMP0]], i32 1
-; DEFAULT-NEXT: br i1 [[TMP12]], label %[[PRED_STORE_IF6:.*]], label %[[PRED_STORE_CONTINUE7:.*]]
-; DEFAULT: [[PRED_STORE_IF6]]:
-; DEFAULT-NEXT: [[TMP13:%.*]] = add i64 [[INDEX]], 1
-; DEFAULT-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP13]]
-; DEFAULT-NEXT: [[TMP15:%.*]] = extractelement <16 x i8> [[TMP7]], i32 1
-; DEFAULT-NEXT: store i8 [[TMP15]], ptr [[TMP14]], align 1
-; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE7]]
-; DEFAULT: [[PRED_STORE_CONTINUE7]]:
-; DEFAULT-NEXT: [[TMP16:%.*]] = extractelement <16 x i1> [[TMP0]], i32 2
-; DEFAULT-NEXT: br i1 [[TMP16]], label %[[PRED_STORE_IF8:.*]], label %[[PRED_STORE_CONTINUE9:.*]]
-; DEFAULT: [[PRED_STORE_IF8]]:
-; DEFAULT-NEXT: [[TMP17:%.*]] = add i64 [[INDEX]], 2
-; DEFAULT-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP17]]
-; DEFAULT-NEXT: [[TMP19:%.*]] = extractelement <16 x i8> [[TMP7]], i32 2
-; DEFAULT-NEXT: store i8 [[TMP19]], ptr [[TMP18]], align 1
-; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE9]]
-; DEFAULT: [[PRED_STORE_CONTINUE9]]:
-; DEFAULT-NEXT: [[TMP20:%.*]] = extractelement <16 x i1> [[TMP0]], i32 3
-; DEFAULT-NEXT: br i1 [[TMP20]], label %[[PRED_STORE_IF10:.*]], label %[[PRED_STORE_CONTINUE11:.*]]
-; DEFAULT: [[PRED_STORE_IF10]]:
-; DEFAULT-NEXT: [[TMP21:%.*]] = add i64 [[INDEX]], 3
-; DEFAULT-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP21]]
-; DEFAULT-NEXT: [[TMP23:%.*]] = extractelement <16 x i8> [[TMP7]], i32 3
-; DEFAULT-NEXT: store i8 [[TMP23]], ptr [[TMP22]], align 1
-; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE11]]
-; DEFAULT: [[PRED_STORE_CONTINUE11]]:
-; DEFAULT-NEXT: [[TMP24:%.*]] = extractelement <16 x i1> [[TMP0]], i32 4
-; DEFAULT-NEXT: br i1 [[TMP24]], label %[[PRED_STORE_IF12:.*]], label %[[PRED_STORE_CONTINUE13:.*]]
-; DEFAULT: [[PRED_STORE_IF12]]:
-; DEFAULT-NEXT: [[TMP25:%.*]] = add i64 [[INDEX]], 4
-; DEFAULT-NEXT: [[TMP26:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP25]]
-; DEFAULT-NEXT: [[TMP27:%.*]] = extractelement <16 x i8> [[TMP7]], i32 4
-; DEFAULT-NEXT: store i8 [[TMP27]], ptr [[TMP26]], align 1
-; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE13]]
-; DEFAULT: [[PRED_STORE_CONTINUE13]]:
-; DEFAULT-NEXT: [[TMP28:%.*]] = extractelement <16 x i1> [[TMP0]], i32 5
-; DEFAULT-NEXT: br i1 [[TMP28]], label %[[PRED_STORE_IF14:.*]], label %[[PRED_STORE_CONTINUE15:.*]]
-; DEFAULT: [[PRED_STORE_IF14]]:
-; DEFAULT-NEXT: [[TMP29:%.*]] = add i64 [[INDEX]], 5
-; DEFAULT-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP29]]
-; DEFAULT-NEXT: [[TMP31:%.*]] = extractelement <16 x i8> [[TMP7]], i32 5
-; DEFAULT-NEXT: store i8 [[TMP31]], ptr [[TMP30]], align 1
-; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE15]]
-; DEFAULT: [[PRED_STORE_CONTINUE15]]:
-; DEFAULT-NEXT: [[TMP32:%.*]] = extractelement <16 x i1> [[TMP0]], i32 6
-; DEFAULT-NEXT: br i1 [[TMP32]], label %[[PRED_STORE_IF16:.*]], label %[[PRED_STORE_CONTINUE17:.*]]
-; DEFAULT: [[PRED_STORE_IF16]]:
-; DEFAULT-NEXT: [[TMP33:%.*]] = add i64 [[INDEX]], 6
-; DEFAULT-NEXT: [[TMP34:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP33]]
-; DEFAULT-NEXT: [[TMP35:%.*]] = extractelement <16 x i8> [[TMP7]], i32 6
-; DEFAULT-NEXT: store i8 [[TMP35]], ptr [[TMP34]], align 1
-; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE17]]
-; DEFAULT: [[PRED_STORE_CONTINUE17]]:
-; DEFAULT-NEXT: [[TMP36:%.*]] = extractelement <16 x i1> [[TMP0]], i32 7
-; DEFAULT-NEXT: br i1 [[TMP36]], label %[[PRED_STORE_IF18:.*]], label %[[PRED_STORE_CONTINUE19:.*]]
-; DEFAULT: [[PRED_STORE_IF18]]:
-; DEFAULT-NEXT: [[TMP37:%.*]] = add i64 [[INDEX]], 7
-; DEFAULT-NEXT: [[TMP38:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP37]]
-; DEFAULT-NEXT: [[TMP39:%.*]] = extractelement <16 x i8> [[TMP7]], i32 7
-; DEFAULT-NEXT: store i8 [[TMP39]], ptr [[TMP38]], align 1
-; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE19]]
-; DEFAULT: [[PRED_STORE_CONTINUE19]]:
-; DEFAULT-NEXT: [[TMP40:%.*]] = extractelement <16 x i1> [[TMP0]], i32 8
-; DEFAULT-NEXT: br i1 [[TMP40]], label %[[PRED_STORE_IF20:.*]], label %[[PRED_STORE_CONTINUE21:.*]]
-; DEFAULT: [[PRED_STORE_IF20]]:
-; DEFAULT-NEXT: [[TMP41:%.*]] = add i64 [[INDEX]], 8
-; DEFAULT-NEXT: [[TMP42:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP41]]
-; DEFAULT-NEXT: [[TMP43:%.*]] = extractelement <16 x i8> [[TMP7]], i32 8
-; DEFAULT-NEXT: store i8 [[TMP43]], ptr [[TMP42]], align 1
-; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE21]]
-; DEFAULT: [[PRED_STORE_CONTINUE21]]:
-; DEFAULT-NEXT: [[TMP44:%.*]] = extractelement <16 x i1> [[TMP0]], i32 9
-; DEFAULT-NEXT: br i1 [[TMP44]], label %[[PRED_STORE_IF22:.*]], label %[[PRED_STORE_CONTINUE23:.*]]
-; DEFAULT: [[PRED_STORE_IF22]]:
-; DEFAULT-NEXT: [[TMP45:%.*]] = add i64 [[INDEX]], 9
-; DEFAULT-NEXT: [[TMP46:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP45]]
-; DEFAULT-NEXT: [[TMP47:%.*]] = extractelement <16 x i8> [[TMP7]], i32 9
-; DEFAULT-NEXT: store i8 [[TMP47]], ptr [[TMP46]], align 1
-; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE23]]
-; DEFAULT: [[PRED_STORE_CONTINUE23]]:
-; DEFAULT-NEXT: [[TMP48:%.*]] = extractelement <16 x i1> [[TMP0]], i32 10
-; DEFAULT-NEXT: br i1 [[TMP48]], label %[[PRED_STORE_IF24:.*]], label %[[PRED_STORE_CONTINUE25:.*]]
-; DEFAULT: [[PRED_STORE_IF24]]:
-; DEFAULT-NEXT: [[TMP49:%.*]] = add i64 [[INDEX]], 10
-; DEFAULT-NEXT: [[TMP50:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP49]]
-; DEFAULT-NEXT: [[TMP51:%.*]] = extractelement <16 x i8> [[TMP7]], i32 10
-; DEFAULT-NEXT: store i8 [[TMP51]], ptr [[TMP50]], align 1
-; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE25]]
-; DEFAULT: [[PRED_STORE_CONTINUE25]]:
-; DEFAULT-NEXT: [[TMP52:%.*]] = extractelement <16 x i1> [[TMP0]], i32 11
-; DEFAULT-NEXT: br i1 [[TMP52]], label %[[PRED_STORE_IF26:.*]], label %[[PRED_STORE_CONTINUE27:.*]]
-; DEFAULT: [[PRED_STORE_IF26]]:
-; DEFAULT-NEXT: [[TMP53:%.*]] = add i64 [[INDEX]], 11
-; DEFAULT-NEXT: [[TMP54:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP53]]
-; DEFAULT-NEXT: [[TMP55:%.*]] = extractelement <16 x i8> [[TMP7]], i32 11
-; DEFAULT-NEXT: store i8 [[TMP55]], ptr [[TMP54]], align 1
-; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE27]]
-; DEFAULT: [[PRED_STORE_CONTINUE27]]:
-; DEFAULT-NEXT: [[TMP56:%.*]] = extractelement <16 x i1> [[TMP0]], i32 12
-; DEFAULT-NEXT: br i1 [[TMP56]], label %[[PRED_STORE_IF28:.*]], label %[[PRED_STORE_CONTINUE29:.*]]
-; DEFAULT: [[PRED_STORE_IF28]]:
-; DEFAULT-NEXT: [[TMP57:%.*]] = add i64 [[INDEX]], 12
-; DEFAULT-NEXT: [[TMP58:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP57]]
-; DEFAULT-NEXT: [[TMP59:%.*]] = extractelement <16 x i8> [[TMP7]], i32 12
-; DEFAULT-NEXT: store i8 [[TMP59]], ptr [[TMP58]], align 1
-; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE29]]
-; DEFAULT: [[PRED_STORE_CONTINUE29]]:
-; DEFAULT-NEXT: [[TMP60:%.*]] = extractelement <16 x i1> [[TMP0]], i32 13
-; DEFAULT-NEXT: br i1 [[TMP60]], label %[[PRED_STORE_IF30:.*]], label %[[PRED_STORE_CONTINUE31:.*]]
-; DEFAULT: [[PRED_STORE_IF30]]:
-; DEFAULT-NEXT: [[TMP61:%.*]] = add i64 [[INDEX]], 13
-; DEFAULT-NEXT: [[TMP62:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP61]]
-; DEFAULT-NEXT: [[TMP63:%.*]] = extractelement <16 x i8> [[TMP7]], i32 13
-; DEFAULT-NEXT: store i8 [[TMP63]], ptr [[TMP62]], align 1
-; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE31]]
-; DEFAULT: [[PRED_STORE_CONTINUE31]]:
-; DEFAULT-NEXT: [[TMP64:%.*]] = extractelement <16 x i1> [[TMP0]], i32 14
-; DEFAULT-NEXT: br i1 [[TMP64]], label %[[PRED_STORE_IF32:.*]], label %[[PRED_STORE_CONTINUE33:.*]]
-; DEFAULT: [[PRED_STORE_IF32]]:
-; DEFAULT-NEXT: [[TMP65:%.*]] = add i64 [[INDEX]], 14
-; DEFAULT-NEXT: [[TMP66:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP65]]
-; DEFAULT-NEXT: [[TMP67:%.*]] = extractelement <16 x i8> [[TMP7]], i32 14
-; DEFAULT-NEXT: store i8 [[TMP67]], ptr [[TMP66]], align 1
-; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE33]]
-; DEFAULT: [[PRED_STORE_CONTINUE33]]:
-; DEFAULT-NEXT: [[TMP68:%.*]] = extractelement <16 x i1> [[TMP0]], i32 15
-; DEFAULT-NEXT: br i1 [[TMP68]], label %[[PRED_STORE_IF34:.*]], label %[[PRED_STORE_CONTINUE35]]
-; DEFAULT: [[PRED_STORE_IF34]]:
-; DEFAULT-NEXT: [[TMP69:%.*]] = add i64 [[INDEX]], 15
+; DEFAULT-NEXT: [[ENTRY:.*]]:
+; DEFAULT-NEXT: br label %[[FOR_BODY:.*]]
+; DEFAULT: [[FOR_BODY]]:
+; DEFAULT-NEXT: [[TMP69:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; DEFAULT-NEXT: [[TMP0:%.*]] = trunc nuw nsw i64 [[TMP69]] to i8
+; DEFAULT-NEXT: [[MUL:%.*]] = mul i8 [[A]], [[TMP0]]
+; DEFAULT-NEXT: [[SHR:%.*]] = lshr i8 [[TMP0]], 1
+; DEFAULT-NEXT: [[MUL5:%.*]] = mul i8 [[SHR]], [[B]]
+; DEFAULT-NEXT: [[ADD:%.*]] = add i8 [[MUL5]], [[MUL]]
+; DEFAULT-NEXT: [[SHR7:%.*]] = lshr i8 [[TMP0]], 2
+; DEFAULT-NEXT: [[MUL9:%.*]] = mul i8 [[SHR7]], [[C]]
+; DEFAULT-NEXT: [[TMP71:%.*]] = add i8 [[ADD]], [[MUL9]]
; DEFAULT-NEXT: [[TMP70:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[TMP69]]
-; DEFAULT-NEXT: [[TMP71:%.*]] = extractelement <16 x i8> [[TMP7]], i32 15
; DEFAULT-NEXT: store i8 [[TMP71]], ptr [[TMP70]], align 1
-; DEFAULT-NEXT: br label %[[PRED_STORE_CONTINUE35]]
-; DEFAULT: [[PRED_STORE_CONTINUE35]]:
-; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
-; DEFAULT-NEXT: [[VEC_IND_NEXT]] = add <16 x i8> [[VEC_IND]], splat (i8 16)
-; DEFAULT-NEXT: [[VEC_IND_NEXT2]] = add <16 x i8> [[VEC_IND1]], splat (i8 16)
-; DEFAULT-NEXT: br i1 true, label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
-; DEFAULT: [[MIDDLE_BLOCK]]:
-; DEFAULT-NEXT: br label %[[FOR_COND_CLEANUP:.*]]
+; DEFAULT-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[TMP69]], 1
+; DEFAULT-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 15
+; DEFAULT-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP:.*]], label %[[FOR_BODY]]
; DEFAULT: [[FOR_COND_CLEANUP]]:
; DEFAULT-NEXT: ret void
;
@@ -449,7 +294,7 @@ define void @dont_vectorize_with_minsize() {
; DEFAULT-NEXT: store <4 x i16> [[TMP11]], ptr [[TMP9]], align 2
; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP0]], 4
; DEFAULT-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 64
-; DEFAULT-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; DEFAULT-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; DEFAULT: [[MIDDLE_BLOCK]]:
; DEFAULT-NEXT: br label %[[FOR_COND_CLEANUP:.*]]
; DEFAULT: [[FOR_COND_CLEANUP]]:
@@ -555,7 +400,7 @@ define void @vectorization_forced() {
; DEFAULT-NEXT: store <4 x i16> [[TMP11]], ptr [[TMP9]], align 2
; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP0]], 4
; DEFAULT-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 64
-; DEFAULT-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; DEFAULT-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; DEFAULT: [[MIDDLE_BLOCK]]:
; DEFAULT-NEXT: br label %[[FOR_COND_CLEANUP:.*]]
; DEFAULT: [[FOR_COND_CLEANUP]]:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll
index f25b86d..b81637f 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll
@@ -293,9 +293,9 @@ define void @test_phi_in_latch_redundant(ptr %dst, i32 %a) {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: br label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[A]], i64 0
+; CHECK-NEXT: [[TMP0:%.*]] = xor i32 [[A]], -1
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP0]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP19:%.*]] = xor <vscale x 4 x i32> [[BROADCAST_SPLAT]], splat (i32 -1)
; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
; CHECK-NEXT: [[TMP7:%.*]] = mul <vscale x 4 x i64> [[TMP6]], splat (i64 9)
; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP7]]
@@ -309,7 +309,7 @@ define void @test_phi_in_latch_redundant(ptr %dst, i32 %a) {
; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP9]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT1]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[DST]], <vscale x 4 x i64> [[VEC_IND]]
-; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP19]], <vscale x 4 x ptr> align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP8]])
+; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[BROADCAST_SPLAT]], <vscale x 4 x ptr> align 4 [[TMP16]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP8]])
; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP5]]
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT2]]
; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[AVL_NEXT]], 0
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll b/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll
index 8ef53ca..345f6f6 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll
@@ -295,8 +295,7 @@ define i8 @mul_non_pow_2_low_trip_count(ptr noalias %a) {
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP0]], align 1
; CHECK-NEXT: [[TMP1]] = mul <8 x i8> [[WIDE_LOAD]], [[VEC_PHI]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
-; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], 8
-; CHECK-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP3:%.*]] = call i8 @llvm.vector.reduce.mul.v8i8(<8 x i8> [[TMP1]])
; CHECK-NEXT: br label [[SCALAR_PH:%.*]]
diff --git a/llvm/test/Transforms/LoopVectorize/WebAssembly/memory-interleave.ll b/llvm/test/Transforms/LoopVectorize/WebAssembly/memory-interleave.ll
index b26e9cf..718e03c 100644
--- a/llvm/test/Transforms/LoopVectorize/WebAssembly/memory-interleave.ll
+++ b/llvm/test/Transforms/LoopVectorize/WebAssembly/memory-interleave.ll
@@ -1231,7 +1231,7 @@ define hidden void @scale_uv_row_down2(ptr nocapture noundef readonly %0, i32 no
; CHECK: LV: Found an estimated cost of 26 for VF 8 For instruction: %14 = load i8
; CHECK: LV: Found an estimated cost of 26 for VF 8 For instruction: %20 = load i8
; CHECK: LV: Found an estimated cost of 7 for VF 8 For instruction: store i8 %48
-; CHECK: LV: Vector loop of width 8 costs: 10.
+; CHECK: LV: Vector loop of width 8 costs: 11.
; CHECK: LV: Found an estimated cost of 132 for VF 16 For instruction: %14 = load i8
; CHECK: LV: Found an estimated cost of 132 for VF 16 For instruction: %20 = load i8
; CHECK: LV: Found an estimated cost of 6 for VF 16 For instruction: store i8 %48
@@ -1442,8 +1442,8 @@ for.body: ; preds = %entry, %for.body
; CHECK: Cost of 11 for VF 4: INTERLEAVE-GROUP with factor 2
; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 2
; CHECK: LV: Scalar loop costs: 18
-; CHECK: LV: Vector loop of width 2 costs: 23
-; CHECK: LV: Vector loop of width 4 costs: 13
+; CHECK: LV: Vector loop of width 2 costs: 27
+; CHECK: LV: Vector loop of width 4 costs: 15
; CHECK: LV: Selecting VF: 4.
define hidden void @two_bytes_two_floats_same_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) {
entry:
@@ -1484,8 +1484,8 @@ for.body: ; preds = %entry, %for.body
; CHECK: Cost of 11 for VF 4: INTERLEAVE-GROUP with factor 2
; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 2
; CHECK: LV: Scalar loop costs: 18
-; CHECK: LV: Vector loop of width 2 costs: 23
-; CHECK: LV: Vector loop of width 4 costs: 13
+; CHECK: LV: Vector loop of width 2 costs: 27
+; CHECK: LV: Vector loop of width 4 costs: 15
; CHECK: LV: Selecting VF: 4.
define hidden void @two_bytes_two_floats_vary_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) {
entry:
@@ -1526,9 +1526,9 @@ for.body: ; preds = %entry, %for.body
; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 2
; CHECK: Cost of 11 for VF 4: INTERLEAVE-GROUP with factor 2
; CHECK: LV: Scalar loop costs: 16
-; CHECK: LV: Vector loop of width 2 costs: 21
-; CHECK: LV: Vector loop of width 4 costs: 14.
-; CHECK: LV: Selecting VF: 4.
+; CHECK: LV: Vector loop of width 2 costs: 26
+; CHECK: LV: Vector loop of width 4 costs: 16.
+; CHECK: LV: Selecting VF: 1.
define hidden void @two_floats_two_bytes_same_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) {
entry:
%cmp22.not = icmp eq i32 %N, 0
@@ -1566,9 +1566,9 @@ for.body: ; preds = %entry, %for.body
; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 2
; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 2
; CHECK: LV: Scalar loop costs: 16
-; CHECK: LV: Vector loop of width 2 costs: 21
-; CHECK: LV: Vector loop of width 4 costs: 14.
-; CHECK: LV: Selecting VF: 4.
+; CHECK: LV: Vector loop of width 2 costs: 26
+; CHECK: LV: Vector loop of width 4 costs: 16.
+; CHECK: LV: Selecting VF: 1.
define hidden void @two_floats_two_bytes_vary_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) {
entry:
%cmp21.not = icmp eq i32 %N, 0
@@ -1608,8 +1608,8 @@ for.body: ; preds = %entry, %for.body
; CHECK: Cost of 7 for VF 4: INTERLEAVE-GROUP with factor 2
; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 2
; CHECK: LV: Scalar loop costs: 18
-; CHECK: LV: Vector loop of width 2 costs: 22
-; CHECK: LV: Vector loop of width 4 costs: 11.
+; CHECK: LV: Vector loop of width 2 costs: 24
+; CHECK: LV: Vector loop of width 4 costs: 12
; CHECK: LV: Selecting VF: 4.
define hidden void @two_shorts_two_floats_same_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) {
entry:
@@ -1652,8 +1652,8 @@ for.body: ; preds = %entry, %for.body
; CHECK: Cost of 7 for VF 4: INTERLEAVE-GROUP with factor 2
; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 2
; CHECK: LV: Scalar loop costs: 18
-; CHECK: LV: Vector loop of width 2 costs: 22
-; CHECK: LV: Vector loop of width 4 costs: 11.
+; CHECK: LV: Vector loop of width 2 costs: 24
+; CHECK: LV: Vector loop of width 4 costs: 12
; CHECK: LV: Selecting VF: 4.
define hidden void @two_shorts_two_floats_vary_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) {
entry:
@@ -1696,9 +1696,9 @@ for.body: ; preds = %entry, %for.body
; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 2
; CHECK: Cost of 7 for VF 4: INTERLEAVE-GROUP with factor 2
; CHECK: LV: Scalar loop costs: 16
-; CHECK: LV: Vector loop of width 2 costs: 20
-; CHECK: LV: Vector loop of width 4 costs: 13.
-; CHECK: LV: Selecting VF: 4.
+; CHECK: LV: Vector loop of width 2 costs: 23
+; CHECK: LV: Vector loop of width 4 costs: 14
+; CHECK: LV: Selecting VF: 4
define hidden void @two_floats_two_shorts_same_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) {
entry:
%cmp22.not = icmp eq i32 %N, 0
@@ -1738,9 +1738,9 @@ for.body: ; preds = %entry, %for.body
; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 2
; CHECK: Cost of 7 for VF 4: INTERLEAVE-GROUP with factor 2
; CHECK: LV: Scalar loop costs: 16
-; CHECK: LV: Vector loop of width 2 costs: 20
-; CHECK: LV: Vector loop of width 4 costs: 13.
-; CHECK: LV: Selecting VF: 4.
+; CHECK: LV: Vector loop of width 2 costs: 23
+; CHECK: LV: Vector loop of width 4 costs: 14
+; CHECK: LV: Selecting VF: 4
define hidden void @two_floats_two_shorts_vary_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) {
entry:
%cmp21.not = icmp eq i32 %N, 0
@@ -1883,8 +1883,8 @@ for.body: ; preds = %entry, %for.body
; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 4
; CHECK: Cost of 36 for VF 4: INTERLEAVE-GROUP with factor 4
; CHECK: LV: Scalar loop costs: 32
-; CHECK: LV: Vector loop of width 2 costs: 43
-; CHECK: LV: Vector loop of width 4 costs: 23
+; CHECK: LV: Vector loop of width 2 costs: 51
+; CHECK: LV: Vector loop of width 4 costs: 27
; CHECK: LV: Selecting VF: 4
define hidden void @four_bytes_four_floats_same_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) {
entry:
@@ -1943,8 +1943,8 @@ for.body: ; preds = %entry, %for.body
; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 4
; CHECK: Cost of 36 for VF 4: INTERLEAVE-GROUP with factor 4
; CHECK: LV: Scalar loop costs: 32
-; CHECK: LV: Vector loop of width 2 costs: 43
-; CHECK: LV: Vector loop of width 4 costs: 23
+; CHECK: LV: Vector loop of width 2 costs: 51
+; CHECK: LV: Vector loop of width 4 costs: 27
; CHECK: LV: Selecting VF: 4
define hidden void @four_bytes_four_floats_vary_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) {
entry:
@@ -2004,9 +2004,9 @@ for.body: ; preds = %entry, %for.body
; CHECK: Cost of 36 for VF 4: INTERLEAVE-GROUP with factor 4
; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 4
; CHECK: LV: Scalar loop costs: 28
-; CHECK: LV: Vector loop of width 2 costs: 38
-; CHECK: LV: Vector loop of width 4 costs: 26
-; CHECK: LV: Selecting VF: 4
+; CHECK: LV: Vector loop of width 2 costs: 48
+; CHECK: LV: Vector loop of width 4 costs: 31
+; CHECK: LV: Selecting VF: 1
define hidden void @four_floats_four_bytes_same_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) {
entry:
%cmp48.not = icmp eq i32 %N, 0
@@ -2061,9 +2061,9 @@ for.body: ; preds = %entry, %for.body
; CHECK: Cost of 36 for VF 4: INTERLEAVE-GROUP with factor 4
; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 4
; CHECK: LV: Scalar loop costs: 28
-; CHECK: LV: Vector loop of width 2 costs: 38
-; CHECK: LV: Vector loop of width 4 costs: 26
-; CHECK: LV: Selecting VF: 4
+; CHECK: LV: Vector loop of width 2 costs: 48
+; CHECK: LV: Vector loop of width 4 costs: 31
+; CHECK: LV: Selecting VF: 1
define hidden void @four_floats_four_bytes_vary_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) {
entry:
%cmp45.not = icmp eq i32 %N, 0
@@ -2119,8 +2119,8 @@ for.body: ; preds = %entry, %for.body
; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 4
; CHECK: Cost of 36 for VF 4: INTERLEAVE-GROUP with factor 4
; CHECK: LV: Scalar loop costs: 32
-; CHECK: LV: Vector loop of width 2 costs: 37
-; CHECK: LV: Vector loop of width 4 costs: 23
+; CHECK: LV: Vector loop of width 2 costs: 41
+; CHECK: LV: Vector loop of width 4 costs: 25
; CHECK: LV: Selecting VF: 4
define hidden void @four_shorts_four_floats_same_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) {
entry:
@@ -2181,8 +2181,8 @@ for.body: ; preds = %entry, %for.body
; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 4
; CHECK: Cost of 36 for VF 4: INTERLEAVE-GROUP with factor 4
; CHECK: LV: Scalar loop costs: 32
-; CHECK: LV: Vector loop of width 2 costs: 37
-; CHECK: LV: Vector loop of width 4 costs: 23
+; CHECK: LV: Vector loop of width 2 costs: 41
+; CHECK: LV: Vector loop of width 4 costs: 25
; CHECK: LV: Selecting VF: 4
define hidden void @four_shorts_four_floats_vary_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) {
entry:
@@ -2243,9 +2243,9 @@ for.body: ; preds = %entry, %for.body
; CHECK: Cost of 36 for VF 4: INTERLEAVE-GROUP with factor 4
; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 4
; CHECK: LV: Scalar loop costs: 28
-; CHECK: LV: Vector loop of width 2 costs: 35
-; CHECK: LV: Vector loop of width 4 costs: 26
-; CHECK: LV: Selecting VF: 4
+; CHECK: LV: Vector loop of width 2 costs: 41
+; CHECK: LV: Vector loop of width 4 costs: 29
+; CHECK: LV: Selecting VF: 1
define hidden void @four_floats_four_shorts_same_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) {
entry:
%cmp48.not = icmp eq i32 %N, 0
@@ -2301,9 +2301,9 @@ for.body: ; preds = %entry, %for.body
; CHECK: Cost of 36 for VF 4: INTERLEAVE-GROUP with factor 4
; CHECK: Cost of 18 for VF 4: INTERLEAVE-GROUP with factor 4
; CHECK: LV: Scalar loop costs: 28
-; CHECK: LV: Vector loop of width 2 costs: 35
-; CHECK: LV: Vector loop of width 4 costs: 26
-; CHECK: LV: Selecting VF: 4
+; CHECK: LV: Vector loop of width 2 costs: 41
+; CHECK: LV: Vector loop of width 4 costs: 29
+; CHECK: LV: Selecting VF: 1
define hidden void @four_floats_four_shorts_vary_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) {
entry:
%cmp45.not = icmp eq i32 %N, 0
diff --git a/llvm/test/Transforms/LoopVectorize/X86/CostModel/masked-interleaved-store-i16.ll b/llvm/test/Transforms/LoopVectorize/X86/CostModel/masked-interleaved-store-i16.ll
index a286df9..c2c04ce 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/CostModel/masked-interleaved-store-i16.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/CostModel/masked-interleaved-store-i16.ll
@@ -85,13 +85,13 @@ define void @test2(ptr noalias nocapture %points, i32 %numPoints, ptr noalias no
; DISABLED_MASKED_STRIDED: LV: Found an estimated cost of 1 for VF 1 For instruction: store i16 %2, ptr %arrayidx7, align 2
; DISABLED_MASKED_STRIDED: LV: Found an estimated cost of 1 for VF 1 For instruction: store i16 %0, ptr %arrayidx2, align 2
; DISABLED_MASKED_STRIDED: LV: Found an estimated cost of 1 for VF 1 For instruction: store i16 %2, ptr %arrayidx7, align 2
-; DISABLED_MASKED_STRIDED: LV: Found an estimated cost of 5 for VF 2 For instruction: store i16 %0, ptr %arrayidx2, align 2
+; DISABLED_MASKED_STRIDED: LV: Found an estimated cost of 8 for VF 2 For instruction: store i16 %0, ptr %arrayidx2, align 2
; DISABLED_MASKED_STRIDED: LV: Found an estimated cost of 3000000 for VF 2 For instruction: store i16 %2, ptr %arrayidx7, align 2
-; DISABLED_MASKED_STRIDED: LV: Found an estimated cost of 10 for VF 4 For instruction: store i16 %0, ptr %arrayidx2, align 2
+; DISABLED_MASKED_STRIDED: LV: Found an estimated cost of 17 for VF 4 For instruction: store i16 %0, ptr %arrayidx2, align 2
; DISABLED_MASKED_STRIDED: LV: Found an estimated cost of 3000000 for VF 4 For instruction: store i16 %2, ptr %arrayidx7, align 2
-; DISABLED_MASKED_STRIDED: LV: Found an estimated cost of 21 for VF 8 For instruction: store i16 %0, ptr %arrayidx2, align 2
+; DISABLED_MASKED_STRIDED: LV: Found an estimated cost of 35 for VF 8 For instruction: store i16 %0, ptr %arrayidx2, align 2
; DISABLED_MASKED_STRIDED: LV: Found an estimated cost of 3000000 for VF 8 For instruction: store i16 %2, ptr %arrayidx7, align 2
-; DISABLED_MASKED_STRIDED: LV: Found an estimated cost of 43 for VF 16 For instruction: store i16 %0, ptr %arrayidx2, align 2
+; DISABLED_MASKED_STRIDED: LV: Found an estimated cost of 71 for VF 16 For instruction: store i16 %0, ptr %arrayidx2, align 2
; DISABLED_MASKED_STRIDED: LV: Found an estimated cost of 3000000 for VF 16 For instruction: store i16 %2, ptr %arrayidx7, align 2
;
; ENABLED_MASKED_STRIDED-LABEL: 'test2'
@@ -99,8 +99,8 @@ define void @test2(ptr noalias nocapture %points, i32 %numPoints, ptr noalias no
; ENABLED_MASKED_STRIDED: LV: Found an estimated cost of 1 for VF 1 For instruction: store i16 %2, ptr %arrayidx7, align 2
; ENABLED_MASKED_STRIDED: LV: Found an estimated cost of 1 for VF 1 For instruction: store i16 %0, ptr %arrayidx2, align 2
; ENABLED_MASKED_STRIDED: LV: Found an estimated cost of 1 for VF 1 For instruction: store i16 %2, ptr %arrayidx7, align 2
-; ENABLED_MASKED_STRIDED: LV: Found an estimated cost of 5 for VF 2 For instruction: store i16 %0, ptr %arrayidx2, align 2
-; ENABLED_MASKED_STRIDED: LV: Found an estimated cost of 5 for VF 2 For instruction: store i16 %2, ptr %arrayidx7, align 2
+; ENABLED_MASKED_STRIDED: LV: Found an estimated cost of 0 for VF 2 For instruction: store i16 %0, ptr %arrayidx2, align 2
+; ENABLED_MASKED_STRIDED: LV: Found an estimated cost of 13 for VF 2 For instruction: store i16 %2, ptr %arrayidx7, align 2
; ENABLED_MASKED_STRIDED: LV: Found an estimated cost of 0 for VF 4 For instruction: store i16 %0, ptr %arrayidx2, align 2
; ENABLED_MASKED_STRIDED: LV: Found an estimated cost of 14 for VF 4 For instruction: store i16 %2, ptr %arrayidx7, align 2
; ENABLED_MASKED_STRIDED: LV: Found an estimated cost of 0 for VF 8 For instruction: store i16 %0, ptr %arrayidx2, align 2
diff --git a/llvm/test/Transforms/LoopVectorize/X86/fixed-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/X86/fixed-order-recurrence.ll
index cc84fab..002d811 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/fixed-order-recurrence.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/fixed-order-recurrence.ll
@@ -435,67 +435,16 @@ define void @test_first_order_recurrence_tried_to_scalarized(ptr %dst, i1 %c, i3
; CHECK-LABEL: @test_first_order_recurrence_tried_to_scalarized(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[N:%.*]] = select i1 [[C:%.*]], i32 8, i32 9
-; CHECK-NEXT: br label [[VECTOR_PH:%.*]]
-; CHECK: vector.ph:
-; CHECK-NEXT: [[N_RND_UP:%.*]] = add i32 [[N]], 3
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N_RND_UP]], 4
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[N_RND_UP]], [[N_MOD_VF]]
-; CHECK-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i32 [[N]], 1
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[TRIP_COUNT_MINUS_1]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
-; CHECK: vector.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE6:%.*]] ]
-; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[PRED_STORE_CONTINUE6]] ]
-; CHECK-NEXT: [[VECTOR_RECUR:%.*]] = phi <4 x i32> [ <i32 poison, i32 poison, i32 poison, i32 4>, [[VECTOR_PH]] ], [ [[VEC_IND]], [[PRED_STORE_CONTINUE6]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <4 x i32> [[VECTOR_RECUR]], <4 x i32> [[VEC_IND]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
-; CHECK-NEXT: [[TMP1:%.*]] = icmp ule <4 x i32> [[VEC_IND]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x i1> [[TMP1]], i32 0
-; CHECK-NEXT: br i1 [[TMP2]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
-; CHECK: pred.store.if:
-; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[INDEX]], 0
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw i32, ptr [[DST:%.*]], i32 [[TMP3]]
-; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x i32> [[TMP0]], i32 0
-; CHECK-NEXT: [[TMP6:%.*]] = sub nsw i32 10, [[TMP5]]
-; CHECK-NEXT: store i32 [[TMP6]], ptr [[TMP4]], align 4
-; CHECK-NEXT: br label [[PRED_STORE_CONTINUE]]
-; CHECK: pred.store.continue:
-; CHECK-NEXT: [[TMP7:%.*]] = extractelement <4 x i1> [[TMP1]], i32 1
-; CHECK-NEXT: br i1 [[TMP7]], label [[PRED_STORE_IF1:%.*]], label [[PRED_STORE_CONTINUE2:%.*]]
-; CHECK: pred.store.if1:
-; CHECK-NEXT: [[TMP8:%.*]] = add i32 [[INDEX]], 1
-; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i32, ptr [[DST]], i32 [[TMP8]]
-; CHECK-NEXT: [[TMP10:%.*]] = extractelement <4 x i32> [[TMP0]], i32 1
-; CHECK-NEXT: [[TMP11:%.*]] = sub nsw i32 10, [[TMP10]]
-; CHECK-NEXT: store i32 [[TMP11]], ptr [[TMP9]], align 4
-; CHECK-NEXT: br label [[PRED_STORE_CONTINUE2]]
-; CHECK: pred.store.continue2:
-; CHECK-NEXT: [[TMP12:%.*]] = extractelement <4 x i1> [[TMP1]], i32 2
-; CHECK-NEXT: br i1 [[TMP12]], label [[PRED_STORE_IF3:%.*]], label [[PRED_STORE_CONTINUE4:%.*]]
-; CHECK: pred.store.if3:
-; CHECK-NEXT: [[TMP13:%.*]] = add i32 [[INDEX]], 2
-; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw i32, ptr [[DST]], i32 [[TMP13]]
-; CHECK-NEXT: [[TMP15:%.*]] = extractelement <4 x i32> [[TMP0]], i32 2
+; CHECK: loop:
+; CHECK-NEXT: [[TMP18:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP15:%.*]] = phi i32 [ 4, [[ENTRY]] ], [ [[TMP18]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[TMP18]], 1
; CHECK-NEXT: [[TMP16:%.*]] = sub nsw i32 10, [[TMP15]]
-; CHECK-NEXT: store i32 [[TMP16]], ptr [[TMP14]], align 4
-; CHECK-NEXT: br label [[PRED_STORE_CONTINUE4]]
-; CHECK: pred.store.continue4:
-; CHECK-NEXT: [[TMP17:%.*]] = extractelement <4 x i1> [[TMP1]], i32 3
-; CHECK-NEXT: br i1 [[TMP17]], label [[PRED_STORE_IF5:%.*]], label [[PRED_STORE_CONTINUE6]]
-; CHECK: pred.store.if5:
-; CHECK-NEXT: [[TMP18:%.*]] = add i32 [[INDEX]], 3
-; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw i32, ptr [[DST]], i32 [[TMP18]]
-; CHECK-NEXT: [[TMP20:%.*]] = extractelement <4 x i32> [[TMP0]], i32 3
-; CHECK-NEXT: [[TMP21:%.*]] = sub nsw i32 10, [[TMP20]]
-; CHECK-NEXT: store i32 [[TMP21]], ptr [[TMP19]], align 4
-; CHECK-NEXT: br label [[PRED_STORE_CONTINUE6]]
-; CHECK: pred.store.continue6:
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
-; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i32> [[VEC_IND]], splat (i32 4)
-; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
-; CHECK: middle.block:
-; CHECK-NEXT: br label [[LOOP:%.*]]
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw i32, ptr [[DST:%.*]], i32 [[TMP18]]
+; CHECK-NEXT: store i32 [[TMP16]], ptr [[TMP19]], align 4
+; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EC]], label [[EXIT:%.*]], label [[VECTOR_BODY]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/X86/limit-vf-by-tripcount.ll b/llvm/test/Transforms/LoopVectorize/X86/limit-vf-by-tripcount.ll
index c1272e5..6e3b2a5 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/limit-vf-by-tripcount.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/limit-vf-by-tripcount.ll
@@ -12,27 +12,22 @@ define void @test_tc_17_no_epilogue_vectorization(ptr noalias %src, ptr noalias
; CHECK: vector.ph:
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 64
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[DST:%.*]], i64 [[INDEX]]
-; CHECK-NEXT: store <16 x i8> [[WIDE_LOAD]], ptr [[TMP3]], align 64
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
-; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16
-; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1:%.*]], align 64
+; CHECK-NEXT: store <16 x i8> [[WIDE_LOAD]], ptr [[TMP3:%.*]], align 64
+; CHECK-NEXT: br label [[MIDDLE_BLOCK:%.*]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[SCALAR_PH:%.*]]
; CHECK: scalar.ph:
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: [[I:%.*]] = phi i64 [ 16, [[SCALAR_PH]] ], [ [[I_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[LDADDR:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[I]]
+; CHECK-NEXT: [[LDADDR:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i64 [[I]]
; CHECK-NEXT: [[VAL:%.*]] = load i8, ptr [[LDADDR]], align 64
-; CHECK-NEXT: [[STADDR:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[I]]
+; CHECK-NEXT: [[STADDR:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i64 [[I]]
; CHECK-NEXT: store i8 [[VAL]], ptr [[STADDR]], align 64
; CHECK-NEXT: [[I_NEXT]] = add i64 [[I]], 1
; CHECK-NEXT: [[IS_NEXT:%.*]] = icmp ult i64 [[I_NEXT]], 17
-; CHECK-NEXT: br i1 [[IS_NEXT]], label [[LOOP]], label [[EXIT:%.*]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-NEXT: br i1 [[IS_NEXT]], label [[LOOP]], label [[EXIT:%.*]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -69,11 +64,11 @@ define void @test_tc_18(ptr noalias %src, ptr noalias %dst) {
; CHECK-NEXT: store <16 x i8> [[WIDE_LOAD]], ptr [[TMP3]], align 64
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16
-; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br i1 false, label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]]
; CHECK: vec.epilog.iter.check:
-; CHECK-NEXT: br i1 false, label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]]
+; CHECK-NEXT: br i1 false, label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF4:![0-9]+]]
; CHECK: vec.epilog.ph:
; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ 16, [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
; CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]]
@@ -140,7 +135,7 @@ define void @test_tc_19(ptr noalias %src, ptr noalias %dst) {
; CHECK: middle.block:
; CHECK-NEXT: br i1 false, label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]]
; CHECK: vec.epilog.iter.check:
-; CHECK-NEXT: br i1 false, label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]]
+; CHECK-NEXT: br i1 false, label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF4]]
; CHECK: vec.epilog.ph:
; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ 16, [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
; CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]]
@@ -219,7 +214,7 @@ define void @test_tc_20(ptr noalias %src, ptr noalias %dst) {
; CHECK: middle.block:
; CHECK-NEXT: br i1 false, label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]]
; CHECK: vec.epilog.iter.check:
-; CHECK-NEXT: br i1 false, label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]]
+; CHECK-NEXT: br i1 false, label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF11:![0-9]+]]
; CHECK: vec.epilog.ph:
; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ 16, [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
; CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]]
@@ -231,7 +226,7 @@ define void @test_tc_20(ptr noalias %src, ptr noalias %dst) {
; CHECK-NEXT: store <4 x i8> [[WIDE_LOAD5]], ptr [[TMP15]], align 64
; CHECK-NEXT: [[INDEX_NEXT6]] = add nuw i64 [[INDEX4]], 4
; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT6]], 20
-; CHECK-NEXT: br i1 [[TMP17]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP17]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK: vec.epilog.middle.block:
; CHECK-NEXT: br i1 true, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]]
; CHECK: vec.epilog.scalar.ph:
@@ -245,7 +240,7 @@ define void @test_tc_20(ptr noalias %src, ptr noalias %dst) {
; CHECK-NEXT: store i8 [[VAL]], ptr [[STADDR]], align 64
; CHECK-NEXT: [[I_NEXT]] = add i64 [[I]], 1
; CHECK-NEXT: [[IS_NEXT:%.*]] = icmp ult i64 [[I_NEXT]], 20
-; CHECK-NEXT: br i1 [[IS_NEXT]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK-NEXT: br i1 [[IS_NEXT]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP13:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
@@ -281,7 +276,7 @@ define void @limit_main_loop_vf_to_avoid_dead_main_vector_loop(ptr noalias %src,
; CHECK-NEXT: store <8 x i8> [[STRIDED_VEC]], ptr [[TMP3]], align 1
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 24
-; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[SCALAR_PH:%.*]]
; CHECK: scalar.ph:
@@ -294,7 +289,7 @@ define void @limit_main_loop_vf_to_avoid_dead_main_vector_loop(ptr noalias %src,
; CHECK-NEXT: store i8 [[L]], ptr [[GEP_DST]], align 1
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[IV_NEXT]], 32
-; CHECK-NEXT: br i1 [[CMP]], label [[EXIT:%.*]], label [[LOOP]], !llvm.loop [[LOOP14:![0-9]+]]
+; CHECK-NEXT: br i1 [[CMP]], label [[EXIT:%.*]], label [[LOOP]], !llvm.loop [[LOOP15:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/X86/load-deref-pred.ll b/llvm/test/Transforms/LoopVectorize/X86/load-deref-pred.ll
index 8771dc9..6605338 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/load-deref-pred.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/load-deref-pred.ll
@@ -2581,8 +2581,7 @@ define i32 @test_non_unit_stride_five(i64 %len, ptr %test_base) {
; CHECK-NEXT: [[TMP114]] = add <4 x i32> [[VEC_PHI2]], [[PREDPHI5]]
; CHECK-NEXT: [[TMP115]] = add <4 x i32> [[VEC_PHI3]], [[PREDPHI6]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
-; CHECK-NEXT: [[TMP116:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16
-; CHECK-NEXT: br i1 [[TMP116]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
+; CHECK-NEXT: br i1 true, label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP113]], [[TMP112]]
; CHECK-NEXT: [[BIN_RDX7:%.*]] = add <4 x i32> [[TMP114]], [[BIN_RDX]]
diff --git a/llvm/test/Transforms/LoopVectorize/debugloc.ll b/llvm/test/Transforms/LoopVectorize/debugloc.ll
index 40cd6b6..03e0853 100644
--- a/llvm/test/Transforms/LoopVectorize/debugloc.ll
+++ b/llvm/test/Transforms/LoopVectorize/debugloc.ll
@@ -253,10 +253,10 @@ declare void @llvm.dbg.value(metadata, metadata, metadata)
!32 = distinct !DILexicalBlock(scope: !31, file: !5, line: 137, column: 2)
!33 = !DILocation(line: 210, column: 44, scope: !32)
!34 = !DILocation(line: 320, column: 44, scope: !32)
-!35 = distinct !DISubprogram(name: "test_misc", line: 3, isLocal: false, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, unit: !0, scopeLine: 3, file: !5, scope: !6, type: !7, retainedNodes: !12)
+!35 = distinct !DISubprogram(name: "test_misc", line: 3, isLocal: false, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, unit: !0, scopeLine: 3, file: !5, scope: !6, type: !7, retainedNodes: !2)
!36 = distinct !DILexicalBlock(scope: !35, file: !5, line: 137, column: 2)
!37 = !DILocation(line: 430, column: 44, scope: !36)
!38 = !DILocation(line: 540, column: 44, scope: !36)
-!39 = distinct !DISubprogram(name: "test_scalar_Steps", line: 3, isLocal: false, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, unit: !0, scopeLine: 3, file: !5, scope: !6, type: !7, retainedNodes: !12)
+!39 = distinct !DISubprogram(name: "test_scalar_Steps", line: 3, isLocal: false, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, unit: !0, scopeLine: 3, file: !5, scope: !6, type: !7, retainedNodes: !2)
!40 = distinct !DILexicalBlock(scope: !39, file: !5, line: 137, column: 2)
!41 = !DILocation(line: 650, column: 44, scope: !40)
diff --git a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-sink-replicate-region.ll b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-sink-replicate-region.ll
index fe230fa..b72cbd3 100644
--- a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-sink-replicate-region.ll
+++ b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-sink-replicate-region.ll
@@ -49,6 +49,8 @@ define void @sink_replicate_region_1(i32 %x, ptr %ptr, ptr noalias %dst) optsize
; CHECK-NEXT: loop.0:
; CHECK-NEXT: WIDEN-CAST ir<%conv> = sext vp<[[PRED1]]> to i32
; CHECK-NEXT: EMIT vp<[[SPLICE:%.+]]> = first-order splice ir<%0>, ir<%conv>
+; CHECK-NEXT: WIDEN ir<%rem> = srem vp<[[SPLICE]]>, ir<%x>
+; CHECK-NEXT: WIDEN ir<%add> = add ir<%conv>, ir<%rem>
; CHECK-NEXT: Successor(s): pred.store
; CHECK-EMPTY:
; CHECK-NEXT: <xVFxUF> pred.store: {
@@ -57,9 +59,7 @@ define void @sink_replicate_region_1(i32 %x, ptr %ptr, ptr noalias %dst) optsize
; CHECK-NEXT: Successor(s): pred.store.if, pred.store.continue
; CHECK-EMPTY:
; CHECK-NEXT: pred.store.if:
-; CHECK-NEXT: REPLICATE ir<%rem> = srem vp<[[SPLICE]]>, ir<%x>
; CHECK-NEXT: REPLICATE ir<%gep.dst> = getelementptr ir<%dst>, vp<[[STEPS]]>
-; CHECK-NEXT: REPLICATE ir<%add> = add ir<%conv>, ir<%rem>
; CHECK-NEXT: REPLICATE store ir<%add>, ir<%gep.dst>
; CHECK-NEXT: Successor(s): pred.store.continue
; CHECK-EMPTY:
@@ -293,27 +293,44 @@ define void @sink_replicate_region_4_requires_split_at_end_of_block(i32 %x, ptr
; CHECK-NEXT: loop.0:
; CHECK-NEXT: WIDEN-CAST ir<%conv> = sext vp<[[PRED]]> to i32
; CHECK-NEXT: EMIT vp<[[SPLICE:%.+]]> = first-order splice ir<%0>, ir<%conv>
-; CHECK-NEXT: Successor(s): pred.store
+; CHECK-NEXT: WIDEN ir<%rem> = srem vp<[[SPLICE]]>, ir<%x>
+; CHECK-NEXT: Successor(s): pred.load
; CHECK-EMPTY:
-; CHECK: <xVFxUF> pred.store: {
-; CHECK-NEXT: pred.store.entry:
+; CHECK: <xVFxUF> pred.load: {
+; CHECK-NEXT: pred.load.entry:
; CHECK-NEXT: BRANCH-ON-MASK vp<[[MASK]]>
-; CHECK-NEXT: Successor(s): pred.store.if, pred.store.continue
+; CHECK-NEXT: Successor(s): pred.load.if, pred.load.continue
; CHECK-EMPTY:
-; CHECK: pred.store.if:
-; CHECK-NEXT: REPLICATE ir<%lv.2> = load ir<%gep>
-; CHECK-NEXT: REPLICATE ir<%rem> = srem vp<[[SPLICE]]>, ir<%x>
-; CHECK-NEXT: REPLICATE ir<%conv.lv.2> = sext ir<%lv.2>
-; CHECK-NEXT: REPLICATE ir<%add.1> = add ir<%conv>, ir<%rem>
-; CHECK-NEXT: REPLICATE ir<%gep.dst> = getelementptr ir<%dst>, vp<[[STEPS]]>
-; CHECK-NEXT: REPLICATE ir<%add> = add ir<%add.1>, ir<%conv.lv.2>
-; CHECK-NEXT: REPLICATE store ir<%add>, ir<%gep.dst>
-; CHECK-NEXT: Successor(s): pred.store.continue
+; CHECK: pred.load.if:
+; CHECK-NEXT: REPLICATE ir<%lv.2> = load ir<%gep> (S->V)
+; CHECK-NEXT: Successor(s): pred.load.continue
; CHECK-EMPTY:
-; CHECK: pred.store.continue:
+; CHECK: pred.load.continue:
+; CHECK-NEXT: PHI-PREDICATED-INSTRUCTION vp<%9> = ir<%lv.2>
; CHECK-NEXT: No successors
; CHECK-NEXT: }
-; CHECK-NEXT: Successor(s): loop.2
+; CHECK-NEXT: Successor(s): loop.1
+; CHECK-EMPTY:
+; CHECK-NEXT: loop.1:
+; CHECK-NEXT: WIDEN ir<%add.1> = add ir<%conv>, ir<%rem>
+; CHECK-NEXT: WIDEN-CAST ir<%conv.lv.2> = sext vp<%9> to i32
+; CHECK-NEXT: WIDEN ir<%add> = add ir<%add.1>, ir<%conv.lv.2>
+; CHECK-NEXT: Successor(s): pred.store
+; CHECK-EMPTY:
+; CHECK-NEXT: <xVFxUF> pred.store: {
+; CHECK-NEXT: pred.store.entry:
+; CHECK-NEXT: BRANCH-ON-MASK vp<[[MASK]]>
+; CHECK-NEXT: Successor(s): pred.store.if, pred.store.continue
+; CHECK-EMPTY:
+; CHECK-NEXT: pred.store.if:
+; CHECK-NEXT: REPLICATE ir<%gep.dst> = getelementptr ir<%dst>, vp<[[STEPS]]>
+; CHECK-NEXT: REPLICATE store ir<%add>, ir<%gep.dst>
+; CHECK-NEXT: Successor(s): pred.store.continue
+; CHECK-EMPTY:
+; CHECK-NEXT: pred.store.continue:
+; CHECK-NEXT: No successors
+; CHECK-NEXT: }
+; CHECK-NEXT: Successor(s): loop.2
; CHECK-EMPTY:
; CHECK: loop.2:
; CHECK-NEXT: EMIT vp<[[CAN_IV_NEXT:%.+]]> = add nuw vp<[[CAN_IV]]>, vp<[[VFxUF]]>
@@ -377,6 +394,7 @@ define void @sink_replicate_region_after_replicate_region(ptr %ptr, ptr noalias
; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION ir<0>, ir<1>, vp<[[VF]]>
; CHECK-NEXT: EMIT vp<[[MASK:%.+]]> = icmp ule ir<%iv>, vp<[[BTC]]>
; CHECK-NEXT: EMIT vp<[[SPLICE:%.+]]> = first-order splice ir<%recur>, ir<%recur.next>
+; CHECK-NEXT: WIDEN ir<%rem> = srem vp<[[SPLICE]]>, ir<%x>
; CHECK-NEXT: Successor(s): pred.store
; CHECK-EMPTY:
; CHECK-NEXT: <xVFxUF> pred.store: {
@@ -386,7 +404,6 @@ define void @sink_replicate_region_after_replicate_region(ptr %ptr, ptr noalias
; CHECK-EMPTY:
; CHECK-NEXT: pred.store.if:
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>
-; CHECK-NEXT: REPLICATE ir<%rem> = srem vp<[[SPLICE]]>, ir<%x>
; CHECK-NEXT: REPLICATE ir<%rem.div> = sdiv ir<20>, ir<%rem>
; CHECK-NEXT: REPLICATE ir<%gep> = getelementptr ir<%ptr>, vp<[[STEPS]]>
; CHECK-NEXT: REPLICATE store ir<%rem.div>, ir<%gep>
@@ -457,6 +474,7 @@ define void @need_new_block_after_sinking_pr56146(i32 %x, ptr %src, ptr noalias
; CHECK-NEXT: EMIT vp<[[CMP:%.+]]> = icmp ule vp<[[WIDE_IV]]>, vp<[[BTC]]>
; CHECK-NEXT: CLONE ir<[[L]]> = load ir<%src>
; CHECK-NEXT: EMIT vp<[[SPLICE:%.+]]> = first-order splice ir<%.pn>, ir<[[L]]>
+; CHECK-NEXT: WIDEN ir<%val> = sdiv vp<[[SPLICE]]>, ir<%x>
; CHECK-NEXT: Successor(s): pred.store
; CHECK-EMPTY:
; CHECK-NEXT: <xVFxUF> pred.store: {
@@ -467,7 +485,6 @@ define void @need_new_block_after_sinking_pr56146(i32 %x, ptr %src, ptr noalias
; CHECK-NEXT: pred.store.if:
; CHECK-NEXT: vp<[[SCALAR_STEPS:%.+]]> = SCALAR-STEPS vp<[[DERIVED_IV]]>, ir<1>, vp<[[VF]]>
; CHECK-NEXT: REPLICATE ir<%gep.dst> = getelementptr ir<%dst>, vp<[[SCALAR_STEPS]]>
-; CHECK-NEXT: REPLICATE ir<%val> = sdiv vp<[[SPLICE]]>, ir<%x>
; CHECK-NEXT: REPLICATE store ir<%val>, ir<%gep.dst>
; CHECK-NEXT: Successor(s): pred.store.continue
; CHECK-EMPTY:
diff --git a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-with-uniform-ops.ll b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-with-uniform-ops.ll
index 8a57973..372876c 100644
--- a/llvm/test/Transforms/LoopVectorize/first-order-recurrence-with-uniform-ops.ll
+++ b/llvm/test/Transforms/LoopVectorize/first-order-recurrence-with-uniform-ops.ll
@@ -134,22 +134,18 @@ define i16 @for_phi_removed(ptr %src) {
; UNROLL-NO-IC: [[VECTOR_BODY]]:
; UNROLL-NO-IC-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; UNROLL-NO-IC-NEXT: [[TMP0:%.*]] = load i32, ptr [[SRC]], align 4
-; UNROLL-NO-IC-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[TMP0]], i64 0
-; UNROLL-NO-IC-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
-; UNROLL-NO-IC-NEXT: [[TMP1:%.*]] = icmp eq <4 x i32> [[BROADCAST_SPLAT]], zeroinitializer
-; UNROLL-NO-IC-NEXT: [[TMP4:%.*]] = extractelement <4 x i1> [[TMP1]], i32 0
-; UNROLL-NO-IC-NEXT: [[TMP2:%.*]] = select i1 [[TMP4]], <4 x i16> splat (i16 1), <4 x i16> zeroinitializer
+; UNROLL-NO-IC-NEXT: [[TMP1:%.*]] = icmp eq i32 [[TMP0]], 0
+; UNROLL-NO-IC-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i16 1, i16 0
; UNROLL-NO-IC-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; UNROLL-NO-IC-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 104
; UNROLL-NO-IC-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; UNROLL-NO-IC: [[MIDDLE_BLOCK]]:
-; UNROLL-NO-IC-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i16> [[TMP2]], i32 3
; UNROLL-NO-IC-NEXT: br label %[[SCALAR_PH:.*]]
; UNROLL-NO-IC: [[SCALAR_PH]]:
; UNROLL-NO-IC-NEXT: br label %[[LOOP:.*]]
; UNROLL-NO-IC: [[LOOP]]:
; UNROLL-NO-IC-NEXT: [[IV:%.*]] = phi i16 [ 104, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; UNROLL-NO-IC-NEXT: [[P:%.*]] = phi i16 [ [[VECTOR_RECUR_EXTRACT]], %[[SCALAR_PH]] ], [ [[SEL:%.*]], %[[LOOP]] ]
+; UNROLL-NO-IC-NEXT: [[P:%.*]] = phi i16 [ [[TMP2]], %[[SCALAR_PH]] ], [ [[SEL:%.*]], %[[LOOP]] ]
; UNROLL-NO-IC-NEXT: [[L:%.*]] = load i32, ptr [[SRC]], align 4
; UNROLL-NO-IC-NEXT: [[C:%.*]] = icmp eq i32 [[L]], 0
; UNROLL-NO-IC-NEXT: [[SEL]] = select i1 [[C]], i16 1, i16 0
@@ -200,22 +196,18 @@ define i16 @for_phi_removed(ptr %src) {
; SINK-AFTER: [[VECTOR_BODY]]:
; SINK-AFTER-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; SINK-AFTER-NEXT: [[TMP0:%.*]] = load i32, ptr [[SRC]], align 4
-; SINK-AFTER-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[TMP0]], i64 0
-; SINK-AFTER-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
-; SINK-AFTER-NEXT: [[TMP1:%.*]] = icmp eq <4 x i32> [[BROADCAST_SPLAT]], zeroinitializer
-; SINK-AFTER-NEXT: [[TMP4:%.*]] = extractelement <4 x i1> [[TMP1]], i32 0
-; SINK-AFTER-NEXT: [[TMP2:%.*]] = select i1 [[TMP4]], <4 x i16> splat (i16 1), <4 x i16> zeroinitializer
+; SINK-AFTER-NEXT: [[TMP1:%.*]] = icmp eq i32 [[TMP0]], 0
+; SINK-AFTER-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i16 1, i16 0
; SINK-AFTER-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; SINK-AFTER-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 108
; SINK-AFTER-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; SINK-AFTER: [[MIDDLE_BLOCK]]:
-; SINK-AFTER-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i16> [[TMP2]], i32 3
; SINK-AFTER-NEXT: br label %[[SCALAR_PH:.*]]
; SINK-AFTER: [[SCALAR_PH]]:
; SINK-AFTER-NEXT: br label %[[LOOP:.*]]
; SINK-AFTER: [[LOOP]]:
; SINK-AFTER-NEXT: [[IV:%.*]] = phi i16 [ 108, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; SINK-AFTER-NEXT: [[P:%.*]] = phi i16 [ [[VECTOR_RECUR_EXTRACT]], %[[SCALAR_PH]] ], [ [[SEL:%.*]], %[[LOOP]] ]
+; SINK-AFTER-NEXT: [[P:%.*]] = phi i16 [ [[TMP2]], %[[SCALAR_PH]] ], [ [[SEL:%.*]], %[[LOOP]] ]
; SINK-AFTER-NEXT: [[L:%.*]] = load i32, ptr [[SRC]], align 4
; SINK-AFTER-NEXT: [[C:%.*]] = icmp eq i32 [[L]], 0
; SINK-AFTER-NEXT: [[SEL]] = select i1 [[C]], i16 1, i16 0
diff --git a/llvm/test/Transforms/LoopVectorize/narrow-to-single-scalar.ll b/llvm/test/Transforms/LoopVectorize/narrow-to-single-scalar.ll
index 7b0c366..440309d 100644
--- a/llvm/test/Transforms/LoopVectorize/narrow-to-single-scalar.ll
+++ b/llvm/test/Transforms/LoopVectorize/narrow-to-single-scalar.ll
@@ -153,3 +153,79 @@ loop:
exit:
ret void
}
+
+define void @narrow_widen_store_user(i32 %x, ptr noalias %A, ptr noalias %B) {
+; VF4IC1-LABEL: define void @narrow_widen_store_user(
+; VF4IC1-SAME: i32 [[X:%.*]], ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) {
+; VF4IC1-NEXT: [[ENTRY:.*:]]
+; VF4IC1-NEXT: br label %[[VECTOR_PH:.*]]
+; VF4IC1: [[VECTOR_PH]]:
+; VF4IC1-NEXT: [[TMP0:%.*]] = add i32 [[X]], 1
+; VF4IC1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[TMP0]], i64 0
+; VF4IC1-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
+; VF4IC1-NEXT: [[TMP5:%.*]] = mul i32 [[TMP0]], 3
+; VF4IC1-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <4 x i32> poison, i32 [[TMP5]], i64 0
+; VF4IC1-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT1]], <4 x i32> poison, <4 x i32> zeroinitializer
+; VF4IC1-NEXT: br label %[[VECTOR_BODY:.*]]
+; VF4IC1: [[VECTOR_BODY]]:
+; VF4IC1-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; VF4IC1-NEXT: [[TMP2:%.*]] = getelementptr i32, ptr [[A]], i32 [[INDEX]]
+; VF4IC1-NEXT: [[TMP3:%.*]] = getelementptr i32, ptr [[B]], i32 [[INDEX]]
+; VF4IC1-NEXT: store <4 x i32> [[BROADCAST_SPLAT]], ptr [[TMP2]], align 4
+; VF4IC1-NEXT: store <4 x i32> [[TMP1]], ptr [[TMP3]], align 4
+; VF4IC1-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
+; VF4IC1-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1024
+; VF4IC1-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; VF4IC1: [[MIDDLE_BLOCK]]:
+; VF4IC1-NEXT: br label %[[EXIT:.*]]
+; VF4IC1: [[EXIT]]:
+; VF4IC1-NEXT: ret void
+;
+; VF2IC2-LABEL: define void @narrow_widen_store_user(
+; VF2IC2-SAME: i32 [[X:%.*]], ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) {
+; VF2IC2-NEXT: [[ENTRY:.*:]]
+; VF2IC2-NEXT: br label %[[VECTOR_PH:.*]]
+; VF2IC2: [[VECTOR_PH]]:
+; VF2IC2-NEXT: [[TMP0:%.*]] = add i32 [[X]], 1
+; VF2IC2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i32> poison, i32 [[TMP0]], i64 0
+; VF2IC2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i32> [[BROADCAST_SPLATINSERT]], <2 x i32> poison, <2 x i32> zeroinitializer
+; VF2IC2-NEXT: [[TMP7:%.*]] = mul i32 [[TMP0]], 3
+; VF2IC2-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <2 x i32> poison, i32 [[TMP7]], i64 0
+; VF2IC2-NEXT: [[TMP1:%.*]] = shufflevector <2 x i32> [[BROADCAST_SPLATINSERT1]], <2 x i32> poison, <2 x i32> zeroinitializer
+; VF2IC2-NEXT: br label %[[VECTOR_BODY:.*]]
+; VF2IC2: [[VECTOR_BODY]]:
+; VF2IC2-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; VF2IC2-NEXT: [[TMP2:%.*]] = getelementptr i32, ptr [[A]], i32 [[INDEX]]
+; VF2IC2-NEXT: [[TMP3:%.*]] = getelementptr i32, ptr [[B]], i32 [[INDEX]]
+; VF2IC2-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[TMP2]], i32 2
+; VF2IC2-NEXT: store <2 x i32> [[BROADCAST_SPLAT]], ptr [[TMP2]], align 4
+; VF2IC2-NEXT: store <2 x i32> [[BROADCAST_SPLAT]], ptr [[TMP4]], align 4
+; VF2IC2-NEXT: [[TMP5:%.*]] = getelementptr i32, ptr [[TMP3]], i32 2
+; VF2IC2-NEXT: store <2 x i32> [[TMP1]], ptr [[TMP3]], align 4
+; VF2IC2-NEXT: store <2 x i32> [[TMP1]], ptr [[TMP5]], align 4
+; VF2IC2-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
+; VF2IC2-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1024
+; VF2IC2-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; VF2IC2: [[MIDDLE_BLOCK]]:
+; VF2IC2-NEXT: br label %[[EXIT:.*]]
+; VF2IC2: [[EXIT]]:
+; VF2IC2-NEXT: ret void
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
+ %gep.A = getelementptr i32, ptr %A, i32 %iv
+ %gep.B = getelementptr i32, ptr %B, i32 %iv
+ %wide.add = add i32 %x, 1
+ %wide.mul = mul i32 %wide.add, 3
+ store i32 %wide.add, ptr %gep.A
+ store i32 %wide.mul, ptr %gep.B
+ %iv.next = add i32 %iv, 1
+ %ec = icmp ne i32 %iv.next, 1024
+ br i1 %ec, label %loop, label %exit
+
+exit:
+ ret void
+}
diff --git a/llvm/test/Transforms/LoopVectorize/pr50686.ll b/llvm/test/Transforms/LoopVectorize/pr50686.ll
index 878fbec..be9110c 100644
--- a/llvm/test/Transforms/LoopVectorize/pr50686.ll
+++ b/llvm/test/Transforms/LoopVectorize/pr50686.ll
@@ -18,20 +18,16 @@ define void @m(ptr nocapture %p, ptr nocapture %p2, i32 %q) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[P2]], align 4, !alias.scope [[META0:![0-9]+]]
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[TMP1]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP2:%.*]] = sub nsw <4 x i32> zeroinitializer, [[BROADCAST_SPLAT]]
-; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ARRAYIDX9_1]], align 4, !alias.scope [[META0]]
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <4 x i32> poison, i32 [[TMP3]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT2]], <4 x i32> poison, <4 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP4:%.*]] = sub nsw <4 x i32> [[TMP2]], [[BROADCAST_SPLAT3]]
-; CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[ARRAYIDX9_2]], align 4, !alias.scope [[META0]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[P2]], align 4, !alias.scope [[META0:![0-9]+]]
+; CHECK-NEXT: [[TMP1:%.*]] = sub nsw i32 0, [[TMP0]]
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX9_1]], align 4, !alias.scope [[META0]]
+; CHECK-NEXT: [[TMP3:%.*]] = sub nsw i32 [[TMP1]], [[TMP2]]
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[ARRAYIDX9_2]], align 4, !alias.scope [[META0]]
+; CHECK-NEXT: [[TMP5:%.*]] = sub nsw i32 [[TMP3]], [[TMP4]]
; CHECK-NEXT: [[BROADCAST_SPLATINSERT4:%.*]] = insertelement <4 x i32> poison, i32 [[TMP5]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT5:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT4]], <4 x i32> poison, <4 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP6:%.*]] = sub nsw <4 x i32> [[TMP4]], [[BROADCAST_SPLAT5]]
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 [[INDEX]]
-; CHECK-NEXT: store <4 x i32> [[TMP6]], ptr [[TMP7]], align 4, !alias.scope [[META3:![0-9]+]], !noalias [[META0]]
+; CHECK-NEXT: store <4 x i32> [[BROADCAST_SPLAT5]], ptr [[TMP7]], align 4, !alias.scope [[META3:![0-9]+]], !noalias [[META0]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 60
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll b/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll
index e160a15..bba459f 100644
--- a/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll
+++ b/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll
@@ -1140,18 +1140,14 @@ define void @test_vector_tc_eq_16(ptr %A) {
; VF8UF2-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 16
; VF8UF2-NEXT: br label %[[VECTOR_BODY:.*]]
; VF8UF2: [[VECTOR_BODY]]:
-; VF8UF2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; VF8UF2-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]]
-; VF8UF2-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 8
-; VF8UF2-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[NEXT_GEP]], align 1
+; VF8UF2-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[A]], i32 8
+; VF8UF2-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[A]], align 1
; VF8UF2-NEXT: [[WIDE_LOAD1:%.*]] = load <8 x i8>, ptr [[TMP1]], align 1
; VF8UF2-NEXT: [[TMP2:%.*]] = add nsw <8 x i8> [[WIDE_LOAD]], splat (i8 10)
; VF8UF2-NEXT: [[TMP3:%.*]] = add nsw <8 x i8> [[WIDE_LOAD1]], splat (i8 10)
-; VF8UF2-NEXT: store <8 x i8> [[TMP2]], ptr [[NEXT_GEP]], align 1
+; VF8UF2-NEXT: store <8 x i8> [[TMP2]], ptr [[A]], align 1
; VF8UF2-NEXT: store <8 x i8> [[TMP3]], ptr [[TMP1]], align 1
-; VF8UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
-; VF8UF2-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16
-; VF8UF2-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; VF8UF2-NEXT: br label %[[MIDDLE_BLOCK:.*]]
; VF8UF2: [[MIDDLE_BLOCK]]:
; VF8UF2-NEXT: br label %[[SCALAR_PH:.*]]
; VF8UF2: [[SCALAR_PH]]:
@@ -1165,7 +1161,7 @@ define void @test_vector_tc_eq_16(ptr %A) {
; VF8UF2-NEXT: store i8 [[ADD]], ptr [[P_SRC]], align 1
; VF8UF2-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1
; VF8UF2-NEXT: [[CMP:%.*]] = icmp eq i64 [[IV_NEXT]], 17
-; VF8UF2-NEXT: br i1 [[CMP]], label %[[EXIT:.*]], label %[[LOOP]], !llvm.loop [[LOOP4:![0-9]+]]
+; VF8UF2-NEXT: br i1 [[CMP]], label %[[EXIT:.*]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
; VF8UF2: [[EXIT]]:
; VF8UF2-NEXT: ret void
;
@@ -1177,14 +1173,10 @@ define void @test_vector_tc_eq_16(ptr %A) {
; VF16UF1-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A]], i64 16
; VF16UF1-NEXT: br label %[[VECTOR_BODY:.*]]
; VF16UF1: [[VECTOR_BODY]]:
-; VF16UF1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; VF16UF1-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]]
-; VF16UF1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[NEXT_GEP]], align 1
+; VF16UF1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[A]], align 1
; VF16UF1-NEXT: [[TMP1:%.*]] = add nsw <16 x i8> [[WIDE_LOAD]], splat (i8 10)
-; VF16UF1-NEXT: store <16 x i8> [[TMP1]], ptr [[NEXT_GEP]], align 1
-; VF16UF1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
-; VF16UF1-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16
-; VF16UF1-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; VF16UF1-NEXT: store <16 x i8> [[TMP1]], ptr [[A]], align 1
+; VF16UF1-NEXT: br label %[[MIDDLE_BLOCK:.*]]
; VF16UF1: [[MIDDLE_BLOCK]]:
; VF16UF1-NEXT: br label %[[SCALAR_PH:.*]]
; VF16UF1: [[SCALAR_PH]]:
@@ -1198,7 +1190,7 @@ define void @test_vector_tc_eq_16(ptr %A) {
; VF16UF1-NEXT: store i8 [[ADD]], ptr [[P_SRC]], align 1
; VF16UF1-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1
; VF16UF1-NEXT: [[CMP:%.*]] = icmp eq i64 [[IV_NEXT]], 17
-; VF16UF1-NEXT: br i1 [[CMP]], label %[[EXIT:.*]], label %[[LOOP]], !llvm.loop [[LOOP4:![0-9]+]]
+; VF16UF1-NEXT: br i1 [[CMP]], label %[[EXIT:.*]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
; VF16UF1: [[EXIT]]:
; VF16UF1-NEXT: ret void
;
@@ -1232,12 +1224,10 @@ exit:
; VF8UF2: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
; VF8UF2: [[META1]] = !{!"llvm.loop.unroll.runtime.disable"}
; VF8UF2: [[META2]] = !{!"llvm.loop.isvectorized", i32 1}
-; VF8UF2: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
-; VF8UF2: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]}
+; VF8UF2: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]], [[META2]]}
;.
; VF16UF1: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
; VF16UF1: [[META1]] = !{!"llvm.loop.unroll.runtime.disable"}
; VF16UF1: [[META2]] = !{!"llvm.loop.isvectorized", i32 1}
-; VF16UF1: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
-; VF16UF1: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]}
+; VF16UF1: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]], [[META2]]}
;.
diff --git a/llvm/test/Transforms/LoopVectorize/vplan-sink-scalars-and-merge.ll b/llvm/test/Transforms/LoopVectorize/vplan-sink-scalars-and-merge.ll
index 2dd6a04..3161a0d 100644
--- a/llvm/test/Transforms/LoopVectorize/vplan-sink-scalars-and-merge.ll
+++ b/llvm/test/Transforms/LoopVectorize/vplan-sink-scalars-and-merge.ll
@@ -1,6 +1,6 @@
; REQUIRES: asserts
-; RUN: opt -passes=loop-vectorize -force-vector-interleave=1 -force-vector-width=2 -debug -disable-output %s 2>&1 | FileCheck %s
+; RUN: opt -passes=loop-vectorize -force-vector-interleave=1 -force-vector-width=2 -force-widen-divrem-via-safe-divisor=0 -debug -disable-output %s 2>&1 | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
diff --git a/llvm/test/Transforms/LowerMatrixIntrinsics/remarks-inlining.ll b/llvm/test/Transforms/LowerMatrixIntrinsics/remarks-inlining.ll
index aaabd18..618ec86 100644
--- a/llvm/test/Transforms/LowerMatrixIntrinsics/remarks-inlining.ll
+++ b/llvm/test/Transforms/LowerMatrixIntrinsics/remarks-inlining.ll
@@ -118,18 +118,18 @@ declare <2 x float> @llvm.matrix.transpose(<2 x float>, i32, i32)
!4 = !{i32 2, !"Debug Info Version", i32 3}
!5 = distinct !DISubprogram(name: "load_fn", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !12)
!17 = !DIFile(filename: "toplevel.c", directory: "/test")
-!16 = distinct !DISubprogram(name: "toplevel", scope: !1, file: !17, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !12)
+!16 = distinct !DISubprogram(name: "toplevel", scope: !1, file: !17, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !2)
!18 = !DIFile(filename: "assign.h", directory: "/test")
-!19 = distinct !DISubprogram(name: "assign", scope: !1, file: !18, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !12)
+!19 = distinct !DISubprogram(name: "assign", scope: !1, file: !18, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !2)
!20 = !DIFile(filename: "add.h", directory: "/test")
-!21 = distinct !DISubprogram(name: "add_fn", scope: !1, file: !20, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !12)
+!21 = distinct !DISubprogram(name: "add_fn", scope: !1, file: !20, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !2)
!22 = !DIFile(filename: "store.h", directory: "/test")
-!23 = distinct !DISubprogram(name: "store_fn", scope: !1, file: !22, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !12)
+!23 = distinct !DISubprogram(name: "store_fn", scope: !1, file: !22, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !2)
!24 = !DIFile(filename: "transpose.h", directory: "/test")
-!25 = distinct !DISubprogram(name: "transpose", scope: !1, file: !24, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !12)
+!25 = distinct !DISubprogram(name: "transpose", scope: !1, file: !24, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !2)
!6 = !DISubroutineType(types: !7)
diff --git a/llvm/test/Transforms/LowerMatrixIntrinsics/remarks.ll b/llvm/test/Transforms/LowerMatrixIntrinsics/remarks.ll
index 628ff08..ff41c57 100644
--- a/llvm/test/Transforms/LowerMatrixIntrinsics/remarks.ll
+++ b/llvm/test/Transforms/LowerMatrixIntrinsics/remarks.ll
@@ -163,26 +163,26 @@ declare void @llvm.matrix.column.major.store(<9 x double>, ptr, i64, i1, i32, i3
!19 = !DILocation(line: 10, column: 20, scope: !5)
!20 = !DILocation(line: 10, column: 10, scope: !5)
-!21 = distinct !DISubprogram(name: "fn2", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !12)
+!21 = distinct !DISubprogram(name: "fn2", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !2)
!22 = !DILocation(line: 30, column: 20, scope: !21)
-!23 = distinct !DISubprogram(name: "fn3", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !12)
+!23 = distinct !DISubprogram(name: "fn3", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !2)
!24 = !DILocation(line: 40, column: 20, scope: !23)
-!25 = distinct !DISubprogram(name: "fn4", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !12)
+!25 = distinct !DISubprogram(name: "fn4", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !2)
!26 = !DILocation(line: 50, column: 20, scope: !25)
-!27 = distinct !DISubprogram(name: "fn5", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !12)
+!27 = distinct !DISubprogram(name: "fn5", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !2)
!28 = !DILocation(line: 60, column: 20, scope: !27)
-!29 = distinct !DISubprogram(name: "fn6", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !12)
+!29 = distinct !DISubprogram(name: "fn6", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !2)
!30 = !DILocation(line: 70, column: 20, scope: !29)
-!31 = distinct !DISubprogram(name: "fn7", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !12)
+!31 = distinct !DISubprogram(name: "fn7", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !2)
!32 = !DILocation(line: 80, column: 20, scope: !31)
-!33 = distinct !DISubprogram(name: "fn8", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !12)
+!33 = distinct !DISubprogram(name: "fn8", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !2)
!34 = !DILocation(line: 90, column: 20, scope: !33)
-!35 = distinct !DISubprogram(name: "fn9", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !12)
+!35 = distinct !DISubprogram(name: "fn9", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, retainedNodes: !2)
!36 = !DILocation(line: 100, column: 20, scope: !35)
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll
index e3765ed..75276c0 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll
@@ -106,23 +106,6 @@ define void @matrix_extract_insert_loop(i32 %i, ptr nonnull align 8 dereferencea
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP4:%.*]] = or disjoint i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP5:%.*]] = or disjoint i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP6:%.*]] = or disjoint i64 [[INDEX]], 3
-; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x i64> poison, i64 [[INDEX]], i64 0
-; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x i64> [[TMP7]], i64 [[TMP4]], i64 1
-; CHECK-NEXT: [[TMP9:%.*]] = insertelement <2 x i64> poison, i64 [[TMP5]], i64 0
-; CHECK-NEXT: [[TMP10:%.*]] = insertelement <2 x i64> [[TMP9]], i64 [[TMP6]], i64 1
-; CHECK-NEXT: [[TMP11:%.*]] = icmp ult <2 x i64> [[TMP8]], splat (i64 225)
-; CHECK-NEXT: [[TMP13:%.*]] = extractelement <2 x i1> [[TMP11]], i64 0
-; CHECK-NEXT: [[TMP14:%.*]] = extractelement <2 x i1> [[TMP11]], i64 1
-; CHECK-NEXT: [[TMP12:%.*]] = icmp ult <2 x i64> [[TMP10]], splat (i64 225)
-; CHECK-NEXT: [[TMP15:%.*]] = extractelement <2 x i1> [[TMP12]], i64 0
-; CHECK-NEXT: [[TMP16:%.*]] = extractelement <2 x i1> [[TMP12]], i64 1
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP13]])
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP14]])
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP15]])
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP16]])
; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds nuw double, ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP17]], i64 16
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[TMP17]], align 8, !alias.scope [[META0:![0-9]+]]
@@ -182,23 +165,6 @@ define void @matrix_extract_insert_loop(i32 %i, ptr nonnull align 8 dereferencea
; CHECK: vector.body.1:
; CHECK-NEXT: [[INDEX_1:%.*]] = phi i64 [ 0, [[VECTOR_PH_1]] ], [ [[INDEX_NEXT_1:%.*]], [[VECTOR_BODY_1]] ]
; CHECK-NEXT: [[TMP33:%.*]] = add nuw nsw i64 [[INDEX_1]], 15
-; CHECK-NEXT: [[TMP34:%.*]] = add nuw nsw i64 [[INDEX_1]], 16
-; CHECK-NEXT: [[TMP35:%.*]] = insertelement <2 x i64> poison, i64 [[TMP33]], i64 0
-; CHECK-NEXT: [[TMP36:%.*]] = insertelement <2 x i64> [[TMP35]], i64 [[TMP34]], i64 1
-; CHECK-NEXT: [[TMP37:%.*]] = add nuw nsw i64 [[INDEX_1]], 17
-; CHECK-NEXT: [[TMP38:%.*]] = add nuw nsw i64 [[INDEX_1]], 18
-; CHECK-NEXT: [[TMP39:%.*]] = insertelement <2 x i64> poison, i64 [[TMP37]], i64 0
-; CHECK-NEXT: [[TMP40:%.*]] = insertelement <2 x i64> [[TMP39]], i64 [[TMP38]], i64 1
-; CHECK-NEXT: [[TMP41:%.*]] = icmp ult <2 x i64> [[TMP36]], splat (i64 225)
-; CHECK-NEXT: [[TMP43:%.*]] = extractelement <2 x i1> [[TMP41]], i64 0
-; CHECK-NEXT: [[TMP44:%.*]] = extractelement <2 x i1> [[TMP41]], i64 1
-; CHECK-NEXT: [[TMP42:%.*]] = icmp ult <2 x i64> [[TMP40]], splat (i64 225)
-; CHECK-NEXT: [[TMP45:%.*]] = extractelement <2 x i1> [[TMP42]], i64 0
-; CHECK-NEXT: [[TMP46:%.*]] = extractelement <2 x i1> [[TMP42]], i64 1
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP43]])
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP44]])
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP45]])
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP46]])
; CHECK-NEXT: [[TMP47:%.*]] = getelementptr inbounds nuw double, ptr [[A]], i64 [[TMP33]]
; CHECK-NEXT: [[TMP48:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP47]], i64 16
; CHECK-NEXT: [[WIDE_LOAD_1:%.*]] = load <2 x double>, ptr [[TMP47]], align 8, !alias.scope [[META0]]
@@ -259,23 +225,6 @@ define void @matrix_extract_insert_loop(i32 %i, ptr nonnull align 8 dereferencea
; CHECK: vector.body.2:
; CHECK-NEXT: [[INDEX_2:%.*]] = phi i64 [ 0, [[VECTOR_PH_2]] ], [ [[INDEX_NEXT_2:%.*]], [[VECTOR_BODY_2]] ]
; CHECK-NEXT: [[TMP64:%.*]] = add nuw nsw i64 [[INDEX_2]], 30
-; CHECK-NEXT: [[TMP65:%.*]] = add nuw nsw i64 [[INDEX_2]], 31
-; CHECK-NEXT: [[TMP66:%.*]] = insertelement <2 x i64> poison, i64 [[TMP64]], i64 0
-; CHECK-NEXT: [[TMP67:%.*]] = insertelement <2 x i64> [[TMP66]], i64 [[TMP65]], i64 1
-; CHECK-NEXT: [[TMP68:%.*]] = add nuw nsw i64 [[INDEX_2]], 32
-; CHECK-NEXT: [[TMP69:%.*]] = add nuw nsw i64 [[INDEX_2]], 33
-; CHECK-NEXT: [[TMP70:%.*]] = insertelement <2 x i64> poison, i64 [[TMP68]], i64 0
-; CHECK-NEXT: [[TMP71:%.*]] = insertelement <2 x i64> [[TMP70]], i64 [[TMP69]], i64 1
-; CHECK-NEXT: [[TMP72:%.*]] = icmp ult <2 x i64> [[TMP67]], splat (i64 225)
-; CHECK-NEXT: [[TMP74:%.*]] = extractelement <2 x i1> [[TMP72]], i64 0
-; CHECK-NEXT: [[TMP75:%.*]] = extractelement <2 x i1> [[TMP72]], i64 1
-; CHECK-NEXT: [[TMP73:%.*]] = icmp ult <2 x i64> [[TMP71]], splat (i64 225)
-; CHECK-NEXT: [[TMP76:%.*]] = extractelement <2 x i1> [[TMP73]], i64 0
-; CHECK-NEXT: [[TMP77:%.*]] = extractelement <2 x i1> [[TMP73]], i64 1
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP74]])
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP75]])
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP76]])
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP77]])
; CHECK-NEXT: [[TMP78:%.*]] = getelementptr inbounds nuw double, ptr [[A]], i64 [[TMP64]]
; CHECK-NEXT: [[TMP79:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP78]], i64 16
; CHECK-NEXT: [[WIDE_LOAD_2:%.*]] = load <2 x double>, ptr [[TMP78]], align 8, !alias.scope [[META0]]
@@ -336,23 +285,6 @@ define void @matrix_extract_insert_loop(i32 %i, ptr nonnull align 8 dereferencea
; CHECK: vector.body.3:
; CHECK-NEXT: [[INDEX_3:%.*]] = phi i64 [ 0, [[VECTOR_PH_3]] ], [ [[INDEX_NEXT_3:%.*]], [[VECTOR_BODY_3]] ]
; CHECK-NEXT: [[TMP95:%.*]] = add nuw nsw i64 [[INDEX_3]], 45
-; CHECK-NEXT: [[TMP96:%.*]] = add nuw nsw i64 [[INDEX_3]], 46
-; CHECK-NEXT: [[TMP97:%.*]] = insertelement <2 x i64> poison, i64 [[TMP95]], i64 0
-; CHECK-NEXT: [[TMP98:%.*]] = insertelement <2 x i64> [[TMP97]], i64 [[TMP96]], i64 1
-; CHECK-NEXT: [[TMP99:%.*]] = add nuw nsw i64 [[INDEX_3]], 47
-; CHECK-NEXT: [[TMP100:%.*]] = add nuw nsw i64 [[INDEX_3]], 48
-; CHECK-NEXT: [[TMP101:%.*]] = insertelement <2 x i64> poison, i64 [[TMP99]], i64 0
-; CHECK-NEXT: [[TMP102:%.*]] = insertelement <2 x i64> [[TMP101]], i64 [[TMP100]], i64 1
-; CHECK-NEXT: [[TMP103:%.*]] = icmp ult <2 x i64> [[TMP98]], splat (i64 225)
-; CHECK-NEXT: [[TMP105:%.*]] = extractelement <2 x i1> [[TMP103]], i64 0
-; CHECK-NEXT: [[TMP106:%.*]] = extractelement <2 x i1> [[TMP103]], i64 1
-; CHECK-NEXT: [[TMP104:%.*]] = icmp ult <2 x i64> [[TMP102]], splat (i64 225)
-; CHECK-NEXT: [[TMP107:%.*]] = extractelement <2 x i1> [[TMP104]], i64 0
-; CHECK-NEXT: [[TMP108:%.*]] = extractelement <2 x i1> [[TMP104]], i64 1
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP105]])
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP106]])
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP107]])
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP108]])
; CHECK-NEXT: [[TMP109:%.*]] = getelementptr inbounds nuw double, ptr [[A]], i64 [[TMP95]]
; CHECK-NEXT: [[TMP110:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP109]], i64 16
; CHECK-NEXT: [[WIDE_LOAD_3:%.*]] = load <2 x double>, ptr [[TMP109]], align 8, !alias.scope [[META0]]
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/std-find.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/std-find.ll
index e914979..fd7b75f 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/std-find.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/std-find.ll
@@ -8,7 +8,6 @@ define i64 @std_find_i16_constant_offset_with_assumptions(ptr %first.coerce, i16
; CHECK-SAME: ptr [[FIRST_COERCE:%.*]], i16 noundef signext [[S:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[FIRST_COERCE]], i64 2) ]
-; CHECK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[FIRST_COERCE]], i64 256) ]
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[S]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <8 x i16> [[BROADCAST_SPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -133,15 +132,14 @@ define ptr @std_find_caller(ptr noundef %first, ptr noundef %last) {
; CHECK-LABEL: define noundef ptr @std_find_caller(
; CHECK-SAME: ptr noundef [[FIRST:%.*]], ptr noundef [[LAST:%.*]]) local_unnamed_addr #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
-; CHECK-NEXT: [[FIRST3:%.*]] = ptrtoint ptr [[FIRST]] to i64
-; CHECK-NEXT: [[LAST_I64:%.*]] = ptrtoint ptr [[LAST]] to i64
-; CHECK-NEXT: [[PTR_SUB:%.*]] = sub i64 [[LAST_I64]], [[FIRST3]]
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[FIRST]], i64 2) ]
; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[LAST]], i64 2) ]
-; CHECK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[FIRST]], i64 [[PTR_SUB]]) ]
; CHECK-NEXT: [[PRE_I:%.*]] = icmp eq ptr [[FIRST]], [[LAST]]
; CHECK-NEXT: br i1 [[PRE_I]], label %[[STD_FIND_GENERIC_IMPL_EXIT:.*]], label %[[LOOP_HEADER_I_PREHEADER:.*]]
; CHECK: [[LOOP_HEADER_I_PREHEADER]]:
+; CHECK-NEXT: [[LAST_I64:%.*]] = ptrtoint ptr [[LAST]] to i64
+; CHECK-NEXT: [[FIRST3:%.*]] = ptrtoint ptr [[FIRST]] to i64
+; CHECK-NEXT: [[PTR_SUB:%.*]] = sub i64 [[LAST_I64]], [[FIRST3]]
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[FIRST]], i64 [[PTR_SUB]]
; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[LAST_I64]], -2
; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[TMP0]], [[FIRST3]]
diff --git a/llvm/test/Transforms/Util/DeclareRuntimeLibcalls/armpl.ll b/llvm/test/Transforms/Util/DeclareRuntimeLibcalls/armpl.ll
new file mode 100644
index 0000000..1d9cf6a
--- /dev/null
+++ b/llvm/test/Transforms/Util/DeclareRuntimeLibcalls/armpl.ll
@@ -0,0 +1,21 @@
+; REQUIRES: aarch64-registered-target
+; RUN: opt -S -passes=declare-runtime-libcalls -mtriple=aarch64-unknown-linux -mattr=+neon,+sve -vector-library=ArmPL < %s | FileCheck %s
+
+; CHECK: declare void @armpl_svsincos_f32_x(<vscale x 4 x float>, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16, <vscale x 4 x i1>) [[ATTRS:#[0-9]+]]
+
+; CHECK: declare void @armpl_svsincos_f64_x(<vscale x 2 x double>, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16, <vscale x 2 x i1>) [[ATTRS]]
+
+; CHECK: declare void @armpl_svsincospi_f32_x(<vscale x 4 x float>, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16, <vscale x 4 x i1>) [[ATTRS]]
+
+; CHECK: declare void @armpl_svsincospi_f64_x(<vscale x 2 x double>, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16, <vscale x 2 x i1>) [[ATTRS]]
+
+; CHECK: declare void @armpl_vsincospiq_f32(<4 x float>, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16) [[ATTRS]]
+
+; CHECK: declare void @armpl_vsincospiq_f64(<2 x double>, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16) [[ATTRS]]
+
+; CHECK: declare aarch64_vector_pcs void @armpl_vsincosq_f32(<4 x float>, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16) [[ATTRS]]
+
+; CHECK: declare aarch64_vector_pcs void @armpl_vsincosq_f64(<2 x double>, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16) [[ATTRS]]
+
+
+; CHECK: attributes [[ATTRS]] = { nocallback nofree nosync nounwind willreturn memory(argmem: write) }
diff --git a/llvm/test/Transforms/Util/DeclareRuntimeLibcalls/sleef.ll b/llvm/test/Transforms/Util/DeclareRuntimeLibcalls/sleef.ll
new file mode 100644
index 0000000..2c69007
--- /dev/null
+++ b/llvm/test/Transforms/Util/DeclareRuntimeLibcalls/sleef.ll
@@ -0,0 +1,20 @@
+; REQUIRES: aarch64-registered-target
+; RUN: opt -S -passes=declare-runtime-libcalls -mtriple=aarch64-unknown-linux -mattr=+neon,+sve -vector-library=sleefgnuabi < %s | FileCheck %s
+
+; CHECK: declare void @_ZGVnN2vl8l8_sincos(<2 x double>, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16) [[ATTRS:#[0-9]+]]
+
+; CHECK: declare void @_ZGVnN2vl8l8_sincospi(<2 x double>, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16) [[ATTRS]]
+
+; CHECK: declare void @_ZGVnN4vl4l4_sincosf(<4 x float>, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16) [[ATTRS]]
+
+; CHECK: declare void @_ZGVnN4vl4l4_sincospif(<4 x float>, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16) [[ATTRS]]
+
+; CHECK: declare void @_ZGVsNxvl4l4_sincosf(<vscale x 4 x float>, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16) [[ATTRS]]
+
+; CHECK: declare void @_ZGVsNxvl4l4_sincospif(<vscale x 4 x float>, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16) [[ATTRS]]
+
+; CHECK: declare void @_ZGVsNxvl8l8_sincos(<vscale x 2 x double>, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16) [[ATTRS]]
+
+; CHECK: declare void @_ZGVsNxvl8l8_sincospi(<vscale x 2 x double>, ptr noalias nonnull writeonly align 16, ptr noalias nonnull writeonly align 16) [[ATTRS]]
+
+; CHECK: attributes [[ATTRS]] = { nocallback nofree nosync nounwind willreturn memory(argmem: write) }
diff --git a/llvm/test/Transforms/Util/annotation-remarks-dbg-info.ll b/llvm/test/Transforms/Util/annotation-remarks-dbg-info.ll
index a0fa79a..7fc7207 100644
--- a/llvm/test/Transforms/Util/annotation-remarks-dbg-info.ll
+++ b/llvm/test/Transforms/Util/annotation-remarks-dbg-info.ll
@@ -72,5 +72,5 @@ entry:
!14 = !{!15}
!15 = !DILocalVariable(name: "a", arg: 1, scope: !7, file: !1, line: 1, type: !10)
!16 = !DILocation(line: 400, column: 3, scope: !7)
-!17 = distinct !DISubprogram(name: "test2", scope: !1, file: !1, line: 21, type: !8, scopeLine: 20, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !14)
+!17 = distinct !DISubprogram(name: "test2", scope: !1, file: !1, line: 21, type: !8, scopeLine: 20, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !2)
!18 = !DILocation(line: 200, column: 3, scope: !17)
diff --git a/llvm/test/tools/llc/save-stats.ll b/llvm/test/tools/llc/save-stats.ll
index acb0367..a5769f8 100644
--- a/llvm/test/tools/llc/save-stats.ll
+++ b/llvm/test/tools/llc/save-stats.ll
@@ -1,10 +1,10 @@
; REQUIRES: asserts
-; REQUIRES: aarch64-registered-target
-; RUN: llc -mtriple=arm64-apple-macosx --save-stats=obj -o %t.s %s && cat %t.stats | FileCheck %s
-; RUN: llc -mtriple=arm64-apple-macosx --save-stats=cwd -o %t.s %s && cat %{t:stem}.tmp.stats | FileCheck %s
-; RUN: llc -mtriple=arm64-apple-macosx --save-stats -o %t.s %s && cat %{t:stem}.tmp.stats | FileCheck %s
-; RUN: not llc -mtriple=arm64-apple-macosx --save-stats=invalid -o %t.s %s 2>&1 | FileCheck %s --check-prefix=INVALID_ARG
+; RUN: rm -rf %t.dir && mkdir -p %t.dir && cd %t.dir
+; RUN: llc --save-stats=obj -o %t.s %s && cat %t.stats | FileCheck %s
+; RUN: llc --save-stats=cwd -o %t.s %s && cat %{t:stem}.tmp.stats | FileCheck %s
+; RUN: llc --save-stats -o %t.s %s && cat %{t:stem}.tmp.stats | FileCheck %s
+; RUN: not llc --save-stats=invalid -o %t.s %s 2>&1 | FileCheck %s --check-prefix=INVALID_ARG
; CHECK: {
; CHECK: "asm-printer.EmittedInsts":
diff --git a/llvm/tools/llvm-exegesis/lib/X86/Target.cpp b/llvm/tools/llvm-exegesis/lib/X86/Target.cpp
index 6dc64765..77b2f49 100644
--- a/llvm/tools/llvm-exegesis/lib/X86/Target.cpp
+++ b/llvm/tools/llvm-exegesis/lib/X86/Target.cpp
@@ -28,7 +28,6 @@
#include "llvm/TargetParser/Host.h"
#include <memory>
-#include <string>
#include <vector>
#if defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64)) && \
!defined(_M_ARM64EC)
diff --git a/llvm/tools/llvm-gpu-loader/amdhsa.cpp b/llvm/tools/llvm-gpu-loader/amdhsa.cpp
index 5715058..fa9ee18 100644
--- a/llvm/tools/llvm-gpu-loader/amdhsa.cpp
+++ b/llvm/tools/llvm-gpu-loader/amdhsa.cpp
@@ -26,7 +26,6 @@
#include <cstdlib>
#include <cstring>
#include <thread>
-#include <tuple>
#include <utility>
// The implicit arguments of COV5 AMDGPU kernels.
diff --git a/llvm/tools/llvm-mc/Disassembler.h b/llvm/tools/llvm-mc/Disassembler.h
index dd8525d..76cee9e 100644
--- a/llvm/tools/llvm-mc/Disassembler.h
+++ b/llvm/tools/llvm-mc/Disassembler.h
@@ -14,8 +14,6 @@
#ifndef LLVM_TOOLS_LLVM_MC_DISASSEMBLER_H
#define LLVM_TOOLS_LLVM_MC_DISASSEMBLER_H
-#include <string>
-
namespace llvm {
class MemoryBuffer;
diff --git a/llvm/tools/llvm-stress/llvm-stress.cpp b/llvm/tools/llvm-stress/llvm-stress.cpp
index 133812e..2fe5d6b 100644
--- a/llvm/tools/llvm-stress/llvm-stress.cpp
+++ b/llvm/tools/llvm-stress/llvm-stress.cpp
@@ -40,7 +40,6 @@
#include "llvm/Support/ToolOutputFile.h"
#include "llvm/Support/WithColor.h"
#include "llvm/Support/raw_ostream.h"
-#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
diff --git a/llvm/tools/llvm-xray/trie-node.h b/llvm/tools/llvm-xray/trie-node.h
index b42b029..f96be59 100644
--- a/llvm/tools/llvm-xray/trie-node.h
+++ b/llvm/tools/llvm-xray/trie-node.h
@@ -15,7 +15,6 @@
#define LLVM_TOOLS_LLVM_XRAY_STACK_TRIE_H
#include <forward_list>
-#include <numeric>
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
diff --git a/llvm/unittests/ADT/CombinationGeneratorTest.cpp b/llvm/unittests/ADT/CombinationGeneratorTest.cpp
index f3e174a..219e18b 100644
--- a/llvm/unittests/ADT/CombinationGeneratorTest.cpp
+++ b/llvm/unittests/ADT/CombinationGeneratorTest.cpp
@@ -12,7 +12,6 @@
#include "llvm/Support/ErrorHandling.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
-#include <algorithm>
#include <cstddef>
#include <iterator>
#include <vector>
diff --git a/llvm/unittests/ADT/DAGDeltaAlgorithmTest.cpp b/llvm/unittests/ADT/DAGDeltaAlgorithmTest.cpp
index 918a2e6..9f51490 100644
--- a/llvm/unittests/ADT/DAGDeltaAlgorithmTest.cpp
+++ b/llvm/unittests/ADT/DAGDeltaAlgorithmTest.cpp
@@ -9,7 +9,6 @@
#include "llvm/ADT/DAGDeltaAlgorithm.h"
#include "llvm/ADT/STLExtras.h"
#include "gtest/gtest.h"
-#include <algorithm>
#include <cstdarg>
using namespace llvm;
diff --git a/llvm/unittests/ADT/DeltaAlgorithmTest.cpp b/llvm/unittests/ADT/DeltaAlgorithmTest.cpp
index 24e18f4..530bd1c 100644
--- a/llvm/unittests/ADT/DeltaAlgorithmTest.cpp
+++ b/llvm/unittests/ADT/DeltaAlgorithmTest.cpp
@@ -9,7 +9,6 @@
#include "llvm/ADT/DeltaAlgorithm.h"
#include "llvm/ADT/STLExtras.h"
#include "gtest/gtest.h"
-#include <algorithm>
#include <cstdarg>
using namespace llvm;
diff --git a/llvm/unittests/ADT/STLForwardCompatTest.cpp b/llvm/unittests/ADT/STLForwardCompatTest.cpp
index c6ae6e3..d0092fd 100644
--- a/llvm/unittests/ADT/STLForwardCompatTest.cpp
+++ b/llvm/unittests/ADT/STLForwardCompatTest.cpp
@@ -11,7 +11,6 @@
#include "gtest/gtest.h"
#include <optional>
-#include <tuple>
#include <type_traits>
#include <utility>
diff --git a/llvm/unittests/ADT/SequenceTest.cpp b/llvm/unittests/ADT/SequenceTest.cpp
index 7b7dc85..ab50ad0 100644
--- a/llvm/unittests/ADT/SequenceTest.cpp
+++ b/llvm/unittests/ADT/SequenceTest.cpp
@@ -11,8 +11,9 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
-#include <algorithm>
-#include <numeric>
+#include <iterator>
+#include <limits>
+#include <vector>
using namespace llvm;
diff --git a/llvm/unittests/Analysis/DomTreeUpdaterTest.cpp b/llvm/unittests/Analysis/DomTreeUpdaterTest.cpp
index cabfc2a..9f5fe57 100644
--- a/llvm/unittests/Analysis/DomTreeUpdaterTest.cpp
+++ b/llvm/unittests/Analysis/DomTreeUpdaterTest.cpp
@@ -17,7 +17,6 @@
#include "llvm/Support/SourceMgr.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "gtest/gtest.h"
-#include <algorithm>
using namespace llvm;
diff --git a/llvm/unittests/CodeGen/MFCommon.inc b/llvm/unittests/CodeGen/MFCommon.inc
index 0180ba0..783267a 100644
--- a/llvm/unittests/CodeGen/MFCommon.inc
+++ b/llvm/unittests/CodeGen/MFCommon.inc
@@ -76,8 +76,10 @@ public:
};
class BogusTargetInstrInfo : public TargetInstrInfo {
+ BogusRegisterInfo RegInfo;
+
public:
- BogusTargetInstrInfo() : TargetInstrInfo() {}
+ BogusTargetInstrInfo() : TargetInstrInfo(RegInfo) {}
};
class BogusSubtarget : public TargetSubtargetInfo {
diff --git a/llvm/unittests/ExecutionEngine/Orc/LibraryResolverTest.cpp b/llvm/unittests/ExecutionEngine/Orc/LibraryResolverTest.cpp
index b40b61e..441344b 100644
--- a/llvm/unittests/ExecutionEngine/Orc/LibraryResolverTest.cpp
+++ b/llvm/unittests/ExecutionEngine/Orc/LibraryResolverTest.cpp
@@ -23,7 +23,6 @@
#include "gtest/gtest.h"
#include <algorithm>
-#include <optional>
#include <string>
#include <vector>
diff --git a/llvm/unittests/IR/VFABIDemanglerTest.cpp b/llvm/unittests/IR/VFABIDemanglerTest.cpp
index e30e0f8..7d94613 100644
--- a/llvm/unittests/IR/VFABIDemanglerTest.cpp
+++ b/llvm/unittests/IR/VFABIDemanglerTest.cpp
@@ -15,7 +15,6 @@
#include "llvm/IR/Module.h"
#include "llvm/Support/SourceMgr.h"
#include "gtest/gtest.h"
-#include <optional>
using namespace llvm;
diff --git a/llvm/unittests/Support/SpecialCaseListTest.cpp b/llvm/unittests/Support/SpecialCaseListTest.cpp
index 750feda..812e0d3 100644
--- a/llvm/unittests/Support/SpecialCaseListTest.cpp
+++ b/llvm/unittests/Support/SpecialCaseListTest.cpp
@@ -308,43 +308,49 @@ TEST_F(SpecialCaseListTest, Version2) {
}
TEST_F(SpecialCaseListTest, DotSlash) {
- std::unique_ptr<SpecialCaseList> SCL2 = makeSpecialCaseList("[dot]\n"
- "fun:./foo\n"
- "src:./bar\n"
- "[not]\n"
- "fun:foo\n"
- "src:bar\n");
- std::unique_ptr<SpecialCaseList> SCL3 = makeSpecialCaseList("[dot]\n"
- "fun:./foo\n"
- "src:./bar\n"
- "[not]\n"
- "fun:foo\n"
- "src:bar\n",
- /*Version=*/3);
+ StringRef IgnoreList = "[dot]\n"
+ "fun:./foo\n"
+ "src:./bar\n"
+ "[not]\n"
+ "fun:foo\n"
+ "src:bar\n";
+ std::unique_ptr<SpecialCaseList> SCL2 = makeSpecialCaseList(IgnoreList);
+ std::unique_ptr<SpecialCaseList> SCL3 =
+ makeSpecialCaseList(IgnoreList, /*Version=*/3);
+ std::unique_ptr<SpecialCaseList> SCL4 = makeSpecialCaseList(IgnoreList,
+ /*Version=*/4);
EXPECT_TRUE(SCL2->inSection("dot", "fun", "./foo"));
EXPECT_TRUE(SCL3->inSection("dot", "fun", "./foo"));
+ EXPECT_TRUE(SCL4->inSection("dot", "fun", "./foo"));
EXPECT_FALSE(SCL2->inSection("dot", "fun", "foo"));
EXPECT_FALSE(SCL3->inSection("dot", "fun", "foo"));
+ EXPECT_FALSE(SCL4->inSection("dot", "fun", "foo"));
EXPECT_TRUE(SCL2->inSection("dot", "src", "./bar"));
EXPECT_FALSE(SCL3->inSection("dot", "src", "./bar"));
+ EXPECT_FALSE(SCL4->inSection("dot", "src", "./bar"));
EXPECT_FALSE(SCL2->inSection("dot", "src", "bar"));
EXPECT_FALSE(SCL3->inSection("dot", "src", "bar"));
+ EXPECT_FALSE(SCL4->inSection("dot", "src", "bar"));
EXPECT_FALSE(SCL2->inSection("not", "fun", "./foo"));
EXPECT_FALSE(SCL3->inSection("not", "fun", "./foo"));
+ EXPECT_FALSE(SCL4->inSection("not", "fun", "./foo"));
EXPECT_TRUE(SCL2->inSection("not", "fun", "foo"));
EXPECT_TRUE(SCL3->inSection("not", "fun", "foo"));
+ EXPECT_TRUE(SCL4->inSection("not", "fun", "foo"));
EXPECT_FALSE(SCL2->inSection("not", "src", "./bar"));
EXPECT_TRUE(SCL3->inSection("not", "src", "./bar"));
+ EXPECT_TRUE(SCL4->inSection("not", "src", "./bar"));
EXPECT_TRUE(SCL2->inSection("not", "src", "bar"));
EXPECT_TRUE(SCL3->inSection("not", "src", "bar"));
+ EXPECT_TRUE(SCL4->inSection("not", "src", "bar"));
}
TEST_F(SpecialCaseListTest, LinesInSection) {
diff --git a/llvm/unittests/TargetParser/RISCVISAInfoTest.cpp b/llvm/unittests/TargetParser/RISCVISAInfoTest.cpp
index bfc1275..c55cd94 100644
--- a/llvm/unittests/TargetParser/RISCVISAInfoTest.cpp
+++ b/llvm/unittests/TargetParser/RISCVISAInfoTest.cpp
@@ -1204,6 +1204,7 @@ Experimental extensions
zvfofp8min 0.2
zvkgs 0.7
zvqdotq 0.0
+ smpmpmt 0.6
svukte 0.3
xqccmp 0.3
xqcia 0.7
diff --git a/llvm/unittests/TextAPI/TextStubHelpers.h b/llvm/unittests/TextAPI/TextStubHelpers.h
index 7c9c74a..87ca7e1 100644
--- a/llvm/unittests/TextAPI/TextStubHelpers.h
+++ b/llvm/unittests/TextAPI/TextStubHelpers.h
@@ -8,7 +8,6 @@
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/TextAPI/InterfaceFile.h"
-#include <algorithm>
#include <string>
#ifndef TEXT_STUB_HELPERS_H
diff --git a/llvm/utils/KillTheDoctor/KillTheDoctor.cpp b/llvm/utils/KillTheDoctor/KillTheDoctor.cpp
index 4f642f8..0495560 100644
--- a/llvm/utils/KillTheDoctor/KillTheDoctor.cpp
+++ b/llvm/utils/KillTheDoctor/KillTheDoctor.cpp
@@ -43,7 +43,6 @@
#include "llvm/Support/WindowsError.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/type_traits.h"
-#include <algorithm>
#include <cerrno>
#include <cstdlib>
#include <map>
diff --git a/llvm/utils/TableGen/Common/CodeGenHwModes.h b/llvm/utils/TableGen/Common/CodeGenHwModes.h
index 55062b6..7e45f8e 100644
--- a/llvm/utils/TableGen/Common/CodeGenHwModes.h
+++ b/llvm/utils/TableGen/Common/CodeGenHwModes.h
@@ -15,7 +15,6 @@
#include "llvm/ADT/StringRef.h"
#include <cassert>
#include <map>
-#include <string>
#include <utility>
#include <vector>
diff --git a/llvm/utils/TableGen/Common/CodeGenTarget.h b/llvm/utils/TableGen/Common/CodeGenTarget.h
index b6f9d42..c1ed70d 100644
--- a/llvm/utils/TableGen/Common/CodeGenTarget.h
+++ b/llvm/utils/TableGen/Common/CodeGenTarget.h
@@ -28,7 +28,6 @@
#include "llvm/CodeGenTypes/MachineValueType.h"
#include <cassert>
#include <memory>
-#include <optional>
#include <string>
#include <vector>
diff --git a/llvm/utils/TableGen/InstrInfoEmitter.cpp b/llvm/utils/TableGen/InstrInfoEmitter.cpp
index ee3cd8c..32994c1 100644
--- a/llvm/utils/TableGen/InstrInfoEmitter.cpp
+++ b/llvm/utils/TableGen/InstrInfoEmitter.cpp
@@ -1103,7 +1103,8 @@ void InstrInfoEmitter::run(raw_ostream &OS) {
Twine ClassName = TargetName + "GenInstrInfo";
OS << "struct " << ClassName << " : public TargetInstrInfo {\n"
<< " explicit " << ClassName
- << "(const TargetSubtargetInfo &STI, unsigned CFSetupOpcode = ~0u, "
+ << "(const TargetSubtargetInfo &STI, const TargetRegisterInfo &TRI, "
+ "unsigned CFSetupOpcode = ~0u, "
"unsigned CFDestroyOpcode = ~0u, "
"unsigned CatchRetOpcode = ~0u, unsigned ReturnOpcode = ~0u);\n"
<< " ~" << ClassName << "() override = default;\n"
@@ -1157,9 +1158,11 @@ void InstrInfoEmitter::run(raw_ostream &OS) {
<< TargetName << "InstrComplexDeprecationInfos[];\n";
Twine ClassName = TargetName + "GenInstrInfo";
OS << ClassName << "::" << ClassName
- << "(const TargetSubtargetInfo &STI, unsigned CFSetupOpcode, unsigned "
+ << "(const TargetSubtargetInfo &STI, const TargetRegisterInfo &TRI, "
+ "unsigned CFSetupOpcode, unsigned "
"CFDestroyOpcode, unsigned CatchRetOpcode, unsigned ReturnOpcode)\n"
- << " : TargetInstrInfo(CFSetupOpcode, CFDestroyOpcode, CatchRetOpcode, "
+ << " : TargetInstrInfo(TRI, CFSetupOpcode, CFDestroyOpcode, "
+ "CatchRetOpcode, "
"ReturnOpcode";
if (NumClassesByHwMode != 0)
OS << ", " << TargetName
diff --git a/llvm/utils/TableGen/TableGenBackends.h b/llvm/utils/TableGen/TableGenBackends.h
index 6e13864..261e171 100644
--- a/llvm/utils/TableGen/TableGenBackends.h
+++ b/llvm/utils/TableGen/TableGenBackends.h
@@ -15,8 +15,6 @@
#ifndef LLVM_UTILS_TABLEGEN_TABLEGENBACKENDS_H
#define LLVM_UTILS_TABLEGEN_TABLEGENBACKENDS_H
-#include <string>
-
// A TableGen backend is a function that looks like
//
// EmitFoo(RecordKeeper &RK, raw_ostream &OS /*, anything else you need */ )
diff --git a/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/bugprone/BUILD.gn b/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/bugprone/BUILD.gn
index b01cfb9..5c5958a 100644
--- a/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/bugprone/BUILD.gn
+++ b/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/bugprone/BUILD.gn
@@ -38,6 +38,7 @@ static_library("bugprone") {
"DynamicStaticInitializersCheck.cpp",
"EasilySwappableParametersCheck.cpp",
"EmptyCatchCheck.cpp",
+ "ExceptionCopyConstructorThrowsCheck.cpp",
"ExceptionEscapeCheck.cpp",
"FloatLoopCounterCheck.cpp",
"FoldInitTypeCheck.cpp",
@@ -72,6 +73,7 @@ static_library("bugprone") {
"ParentVirtualCallCheck.cpp",
"PointerArithmeticOnPolymorphicObjectCheck.cpp",
"PosixReturnCheck.cpp",
+ "RandomGeneratorSeedCheck.cpp",
"RawMemoryCallOnNonTrivialTypeCheck.cpp",
"RedundantBranchConditionCheck.cpp",
"ReservedIdentifierCheck.cpp",
diff --git a/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/cert/BUILD.gn b/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/cert/BUILD.gn
index 18708f6..d373ac5 100644
--- a/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/cert/BUILD.gn
+++ b/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/cert/BUILD.gn
@@ -17,7 +17,5 @@ static_library("cert") {
sources = [
"CERTTidyModule.cpp",
"LimitedRandomnessCheck.cpp",
- "ProperlySeededRandomGeneratorCheck.cpp",
- "ThrownExceptionTypeCheck.cpp",
]
}
diff --git a/llvm/utils/gn/secondary/libcxx/include/BUILD.gn b/llvm/utils/gn/secondary/libcxx/include/BUILD.gn
index 27bd2ce..e377b38 100644
--- a/llvm/utils/gn/secondary/libcxx/include/BUILD.gn
+++ b/llvm/utils/gn/secondary/libcxx/include/BUILD.gn
@@ -402,6 +402,8 @@ if (current_toolchain == default_toolchain) {
"__configuration/abi.h",
"__configuration/availability.h",
"__configuration/compiler.h",
+ "__configuration/experimental.h",
+ "__configuration/hardening.h",
"__configuration/language.h",
"__configuration/platform.h",
"__coroutine/coroutine_handle.h",
@@ -1432,7 +1434,6 @@ if (current_toolchain == default_toolchain) {
"__tuple/tuple_like.h",
"__tuple/tuple_like_no_subrange.h",
"__tuple/tuple_size.h",
- "__tuple/tuple_types.h",
"__type_traits/add_cv_quals.h",
"__type_traits/add_pointer.h",
"__type_traits/add_reference.h",
diff --git a/llvm/utils/gn/secondary/llvm/lib/Analysis/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/Analysis/BUILD.gn
index d6c75b9..51555f2 100644
--- a/llvm/utils/gn/secondary/llvm/lib/Analysis/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/lib/Analysis/BUILD.gn
@@ -68,7 +68,6 @@ static_library("Analysis") {
"InlineAdvisor.cpp",
"InlineCost.cpp",
"InlineOrder.cpp",
- "InlineSizeEstimatorAnalysis.cpp",
"InstCount.cpp",
"InstructionPrecedenceTracking.cpp",
"InstructionSimplify.cpp",
diff --git a/llvm/utils/gn/secondary/llvm/lib/IR/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/IR/BUILD.gn
index 22aa0b6..8037c8d 100644
--- a/llvm/utils/gn/secondary/llvm/lib/IR/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/lib/IR/BUILD.gn
@@ -82,6 +82,7 @@ static_library("IR") {
"SafepointIRVerifier.cpp",
"Statepoint.cpp",
"StructuralHash.cpp",
+ "SystemLibraries.cpp",
"Type.cpp",
"TypeFinder.cpp",
"TypedPointerType.cpp",