aboutsummaryrefslogtreecommitdiff
path: root/llvm/test
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/Analysis/UniformityAnalysis/AMDGPU/MIR/addrspacecast.mir35
-rw-r--r--llvm/test/Analysis/UniformityAnalysis/AMDGPU/addrspacecast.ll14
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-split-logic-bitmask-immediate.ll (renamed from llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll)172
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-ext.ll10
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll60
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll120
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.atomic.add.ll473
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.atomic.cmpswap.ll1096
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.load.ll1219
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.load.tfe.ll697
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.store.ll1194
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.atomic.add.ll610
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.atomic.cmpswap.ll1168
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.ll1062
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.tfe.ll757
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.store.ll652
-rw-r--r--llvm/test/CodeGen/AMDGPU/atomics-system-scope.ll1486
-rw-r--r--llvm/test/CodeGen/AMDGPU/bf16-math.ll52
-rw-r--r--llvm/test/CodeGen/AMDGPU/bf16.ll1139
-rw-r--r--llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/flat-scratch.ll31
-rw-r--r--llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll41
-rw-r--r--llvm/test/CodeGen/AMDGPU/fold-operands-frame-index.mir3
-rw-r--r--llvm/test/CodeGen/AMDGPU/fold-sgpr-multi-imm.mir3
-rw-r--r--llvm/test/CodeGen/AMDGPU/fp64-atomics-gfx90a.ll151
-rw-r--r--llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll3
-rw-r--r--llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll5
-rw-r--r--llvm/test/CodeGen/AMDGPU/hard-clauses-gfx1250.mir608
-rw-r--r--llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-agpr-negative-tests.mir524
-rw-r--r--llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-av-with-load-source.mir1044
-rw-r--r--llvm/test/CodeGen/AMDGPU/integer-canonicalizing-src-modifiers.ll93
-rw-r--r--llvm/test/CodeGen/AMDGPU/integer-select-src-modifiers.ll1011
-rw-r--r--llvm/test/CodeGen/AMDGPU/issue130120-eliminate-frame-index.ll26
-rw-r--r--llvm/test/CodeGen/AMDGPU/literal64.ll20
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.buffer.load.ll705
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.atomic.fadd.ll99
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.load.tfe.ll1
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.store.ll1
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.atomic.buffer.load.ll705
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2bf16.ll218
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd_nortn.ll128
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd_rtn.ll143
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.load.bf16.ll35
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.store.bf16.ll23
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.atomic.buffer.load.ll868
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.load.tfe.ll1
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.store.ll111
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.atomic.buffer.load.ll868
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fadd_nortn.ll104
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fadd_rtn.ll110
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32.ll420
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32.ll420
-rw-r--r--llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll27
-rw-r--r--llvm/test/CodeGen/AMDGPU/mad-mix-lo-bf16.ll24
-rw-r--r--llvm/test/CodeGen/AMDGPU/no-folding-imm-to-inst-with-fi.ll108
-rw-r--r--llvm/test/CodeGen/AMDGPU/packed-fp32.ll917
-rw-r--r--llvm/test/CodeGen/AMDGPU/readcyclecounter.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll273
-rw-r--r--llvm/test/CodeGen/AMDGPU/saddsat.ll52
-rw-r--r--llvm/test/CodeGen/AMDGPU/ssubsat.ll378
-rw-r--r--llvm/test/CodeGen/AVR/cmp.ll15
-rw-r--r--llvm/test/CodeGen/DirectX/Binding/binding-overlap-7.ll35
-rw-r--r--llvm/test/CodeGen/DirectX/imad.ll102
-rw-r--r--llvm/test/CodeGen/DirectX/issue-140819_allow_forward_handle_on_alloca.ll33
-rw-r--r--llvm/test/CodeGen/DirectX/umad.ll102
-rw-r--r--llvm/test/CodeGen/NVPTX/prefetch-inferas-test.ll80
-rw-r--r--llvm/test/CodeGen/NVPTX/prefetch.ll43
-rw-r--r--llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll1501
-rw-r--r--llvm/test/CodeGen/PowerPC/aix-nest-param.ll11
-rw-r--r--llvm/test/CodeGen/PowerPC/aix-trampoline.ll22
-rw-r--r--llvm/test/CodeGen/PowerPC/memintr32.ll2
-rw-r--r--llvm/test/CodeGen/PowerPC/memintr64.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/features-info.ll10
-rw-r--r--llvm/test/CodeGen/RISCV/half-convert.ll108
-rw-r--r--llvm/test/CodeGen/RISCV/macro-fusions.mir1376
-rw-r--r--llvm/test/CodeGen/RISCV/misched-load-clustering.ll47
-rw-r--r--llvm/test/CodeGen/RISCV/misched-mem-clustering.mir6
-rw-r--r--llvm/test/CodeGen/RISCV/misched-store-clustering.ll83
-rw-r--r--llvm/test/CodeGen/RISCV/rv32zbkb.ll71
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-half-convert.ll21
-rw-r--r--llvm/test/CodeGen/RISCV/rv64zbkb.ll214
-rw-r--r--llvm/test/CodeGen/RISCV/unaligned-load-store.ll20
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-resources/ImplicitBinding.ll75
-rw-r--r--llvm/test/CodeGen/WebAssembly/ref-test-func.ll30
-rw-r--r--llvm/test/DebugInfo/X86/DW_AT_alloc_type.ll34
-rw-r--r--llvm/test/Instrumentation/TypeSanitizer/alloca.ll53
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_ds.s1911
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_features.s32
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_operands.s25
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_sop1.s4
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_unsupported.s14
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vbuffer_mubuf.s2304
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3_err.s165
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop2_err.s13
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3cx.s3413
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3p_dpp16.s14
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3p_dpp8.s26
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vop3p_err.s74
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_asm_vsample_err.s175
-rw-r--r--llvm/test/MC/AMDGPU/gfx1250_err.s30
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_ds.txt1104
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_operands.txt12
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_sop1.txt3
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vbuffer_mubuf.txt2133
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3cx.txt3413
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3p_dpp16.txt10
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3p_dpp8.txt19
-rw-r--r--llvm/test/MC/Disassembler/RISCV/riscv-mapping-symbols.s20
-rw-r--r--llvm/test/MC/ELF/many-instructions.s3
-rw-r--r--llvm/test/MC/RISCV/large-instructions.s29
-rw-r--r--llvm/test/MC/RISCV/large-instructions.test60
-rw-r--r--llvm/test/MC/RISCV/nop-slide.s13
-rw-r--r--llvm/test/MC/RISCV/rvv/vsetvl-invalid.s24
-rw-r--r--llvm/test/Transforms/GVN/PRE/phi-translate-2.ll198
-rw-r--r--llvm/test/Transforms/GVN/PRE/phi-translate-add.ll45
-rw-r--r--llvm/test/Transforms/GVN/PRE/phi-translate.ll87
-rw-r--r--llvm/test/Transforms/GVN/PRE/pre-aliasning-path.ll61
-rw-r--r--llvm/test/Transforms/GVN/PRE/pre-basic-add.ll52
-rw-r--r--llvm/test/Transforms/GVN/PRE/pre-jt-add.ll30
-rw-r--r--llvm/test/Transforms/GVN/PRE/pre-load-dbg.ll187
-rw-r--r--llvm/test/Transforms/GVN/PRE/pre-load-guards.ll139
-rw-r--r--llvm/test/Transforms/GVN/PRE/pre-load-implicit-cf-updates.ll112
-rw-r--r--llvm/test/Transforms/GVN/PRE/pre-load.ll1089
-rw-r--r--llvm/test/Transforms/GVN/PRE/pre-loop-load-new-pm.ll12
-rw-r--r--llvm/test/Transforms/GVN/PRE/pre-no-cost-phi.ll25
-rw-r--r--llvm/test/Transforms/GVN/PRE/pre-poison-add.ll83
-rw-r--r--llvm/test/Transforms/GVN/PRE/pre-single-pred.ll90
-rw-r--r--llvm/test/Transforms/GVN/PRE/preserve-tbaa.ll53
-rw-r--r--llvm/test/Transforms/InstSimplify/ConstProp/WebAssembly/dot.ll56
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/first-order-recurrence-fold-tail.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/pr151664-cost-hoisted-vector-scalable.ll41
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll30
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-fixed-width-inorder-core.ll70
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-low-trip-count.ll125
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll24
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/tail-folding-styles.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-remove-loop-region.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/ARM/mve-gather-scatter-tailpred.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-types.ll36
-rw-r--r--llvm/test/Transforms/LoopVectorize/ARM/optsize_minsize.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll50
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll16
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/interleaved-cost.ll104
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/only-compute-cost-for-vplan-vfs.ll43
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll14
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll32
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll22
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll56
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll56
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll10
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll14
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/vector-loop-backedge-elimination-with-evl.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/SystemZ/force-target-instruction-cost.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/SystemZ/pr47665.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/SystemZ/predicated-first-order-recurrence.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/constant-fold.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/cost-model.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/drop-inbounds-flags-for-reverse-vector-pointer.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/fixed-order-recurrence.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/induction-costs.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/optsize.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/pr81872.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/scev-checks-unprofitable.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/tail_loop_folding.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/vectorize-force-tail-with-evl.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/vectorize-interleaved-accesses-gap.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/dead_instructions.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/dereferenceable-info-from-assumption-variable-size.ll224
-rw-r--r--llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-divisible-TC.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll42
-rw-r--r--llvm/test/Transforms/LoopVectorize/intrinsic.ll201
-rw-r--r--llvm/test/Transforms/LoopVectorize/iv-select-cmp-decreasing.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/loop-form.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/memdep-fold-tail.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/optsize.ll7
-rw-r--r--llvm/test/Transforms/LoopVectorize/pointer-induction.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/pr45679-fold-tail-by-masking.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/pr46525-expander-insertpoint.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/pr51614-fold-tail-by-masking.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/predicatedinst-loop-invariant.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/scalable-predication.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/scev-predicate-reasoning.ll16
-rw-r--r--llvm/test/Transforms/LoopVectorize/select-reduction.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/store-reduction-results-in-tail-folded-loop.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/strict-fadd-interleave-only.ll24
-rw-r--r--llvm/test/Transforms/LoopVectorize/tail-folding-alloca-in-loop.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/tail-folding-optimize-vector-induction-width.ll18
-rw-r--r--llvm/test/Transforms/LoopVectorize/tail-folding-switch.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/uniform-blend.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll12
-rw-r--r--llvm/test/Transforms/SLPVectorizer/AArch64/commute.ll34
-rw-r--r--llvm/test/Transforms/SLPVectorizer/AArch64/exp.ll279
-rw-r--r--llvm/test/Transforms/SLPVectorizer/AArch64/fround.ll280
-rw-r--r--llvm/test/Transforms/SLPVectorizer/AArch64/reused-scalar-repeated-in-node.ll145
-rw-r--r--llvm/test/Transforms/SLPVectorizer/AArch64/scalarization-overhead.ll57
-rw-r--r--llvm/test/Transforms/SLPVectorizer/RISCV/basic-strided-loads.ll741
-rw-r--r--llvm/test/Transforms/SLPVectorizer/RISCV/vec3-base.ll148
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/dot-product.ll120
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/horizontal-list.ll161
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/pr35497.ll137
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/redux-feed-buildvector.ll70
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/redux-feed-insertelement.ll22
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/slp-fma-loss.ll77
-rw-r--r--llvm/test/Transforms/SLPVectorizer/extracts-with-undefs.ll77
-rw-r--r--llvm/test/Transforms/SLPVectorizer/insertelement-postpone.ll57
-rw-r--r--llvm/test/Transforms/Scalarizer/intrinsics.ll47
-rw-r--r--llvm/test/tools/llvm-objdump/MachO/bad-trie.test6
233 files changed, 39155 insertions, 8636 deletions
diff --git a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/MIR/addrspacecast.mir b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/MIR/addrspacecast.mir
new file mode 100644
index 0000000..612f7b7
--- /dev/null
+++ b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/MIR/addrspacecast.mir
@@ -0,0 +1,35 @@
+# NOTE: This file is Generic MIR translation of llvm/test/Analysis/UniformityAnalysis/AMDGPU/addrspacecast.ll test file
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -run-pass=print-machine-uniformity -filetype=null %s 2>&1 | FileCheck %s --check-prefix=UNI
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1250 -run-pass=print-machine-uniformity -filetype=null %s 2>&1 | FileCheck %s --check-prefix=DIV
+
+# UNI: ALL VALUES UNIFORM
+# DIV: DIVERGENT: %3: %3:_(p0) = G_ADDRSPACE_CAST %2:_(p5)
+# DIV: DIVERGENT: %4: %4:_(p0) = G_INTRINSIC intrinsic(@llvm.amdgcn.addrspacecast.nonnull), %2:_(p5)
+
+--- |
+ define void @foo() {
+ %alloca = alloca i32, align 4, addrspace(5)
+ %cast = addrspacecast ptr addrspace(5) %alloca to ptr
+ store i32 1, ptr %cast, align 4
+ %cast.1 = call ptr @llvm.amdgcn.addrspacecast.nonnull.p0.p5(ptr addrspace(5) %alloca)
+ store i32 2, ptr %cast.1, align 4
+ ret void
+ }
+...
+---
+name: foo
+stack:
+ - { id: 0, name: alloca, type: default, offset: 0, size: 4, alignment: 4,
+ stack-id: default, callee-saved-register: '', callee-saved-restored: true,
+ debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+body: |
+ bb.1 (%ir-block.0):
+ %10:_(s32) = G_CONSTANT i32 1
+ %12:_(s32) = G_CONSTANT i32 2
+ %8:_(p5) = G_FRAME_INDEX %stack.0.alloca
+ %9:_(p0) = G_ADDRSPACE_CAST %8(p5)
+ G_STORE %10(s32), %9(p0) :: (store (s32) into %ir.cast)
+ %11:_(p0) = G_INTRINSIC intrinsic(@llvm.amdgcn.addrspacecast.nonnull), %8(p5)
+ G_STORE %12(s32), %11(p0) :: (store (s32) into %ir.cast.1)
+ SI_RETURN
+...
diff --git a/llvm/test/Analysis/UniformityAnalysis/AMDGPU/addrspacecast.ll b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/addrspacecast.ll
new file mode 100644
index 0000000..e680844
--- /dev/null
+++ b/llvm/test/Analysis/UniformityAnalysis/AMDGPU/addrspacecast.ll
@@ -0,0 +1,14 @@
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes='print<uniformity>' -disable-output %s 2>&1 | FileCheck %s --check-prefix=UNI
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1250 -passes='print<uniformity>' -disable-output %s 2>&1 | FileCheck %s --check-prefix=DIV
+
+; UNI: ALL VALUES UNIFORM
+; DIV: DIVERGENT: %cast = addrspacecast ptr addrspace(5) %alloca to ptr
+; DIV: DIVERGENT: %cast.1 = call ptr @llvm.amdgcn.addrspacecast.nonnull.p0.p5(ptr addrspace(5) %alloca)
+define void @foo() {
+ %alloca = alloca i32, align 4, addrspace(5)
+ %cast = addrspacecast ptr addrspace(5) %alloca to ptr
+ store i32 1, ptr %cast
+ %cast.1 = call ptr @llvm.amdgcn.addrspacecast.nonnull.p0.p5(ptr addrspace(5) %alloca)
+ store i32 2, ptr %cast.1
+ ret void
+}
diff --git a/llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll b/llvm/test/CodeGen/AArch64/aarch64-split-logic-bitmask-immediate.ll
index 113eb14..4db9db9 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-split-logic-bitmask-immediate.ll
@@ -370,3 +370,175 @@ entry:
%r = select i1 %c, i64 %a, i64 %ands
ret i64 %r
}
+
+; Test EOR.
+define i32 @test1_eor(i32 %a) {
+; CHECK-LABEL: test1_eor:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: eor w8, w0, #0x400
+; CHECK-NEXT: eor w0, w8, #0x200000
+; CHECK-NEXT: ret
+entry:
+ %eor = xor i32 %a, 2098176
+ ret i32 %eor
+}
+
+; This constant should not be split because it can be handled by one mov.
+define i32 @test2_eor(i32 %a) {
+; CHECK-LABEL: test2_eor:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #135 // =0x87
+; CHECK-NEXT: eor w0, w0, w8
+; CHECK-NEXT: ret
+entry:
+ %eor = xor i32 %a, 135
+ ret i32 %eor
+}
+
+; This constant should not be split because the split immediate is not valid
+; bitmask immediate.
+define i32 @test3_eor(i32 %a) {
+; CHECK-LABEL: test3_eor:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1024 // =0x400
+; CHECK-NEXT: movk w8, #33, lsl #16
+; CHECK-NEXT: eor w0, w0, w8
+; CHECK-NEXT: ret
+entry:
+ %eor = xor i32 %a, 2163712
+ ret i32 %eor
+}
+
+define i64 @test4_eor(i64 %a) {
+; CHECK-LABEL: test4_eor:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: eor x8, x0, #0x400
+; CHECK-NEXT: eor x0, x8, #0x200000
+; CHECK-NEXT: ret
+entry:
+ %eor = xor i64 %a, 2098176
+ ret i64 %eor
+}
+
+define i64 @test5_eor(i64 %a) {
+; CHECK-LABEL: test5_eor:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: eor x8, x0, #0x4000
+; CHECK-NEXT: eor x0, x8, #0x200000000
+; CHECK-NEXT: ret
+entry:
+ %eor = xor i64 %a, 8589950976
+ ret i64 %eor
+}
+
+; This constant should not be split because it can be handled by one mov.
+define i64 @test6_eor(i64 %a) {
+; CHECK-LABEL: test6_eor:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #135 // =0x87
+; CHECK-NEXT: eor x0, x0, x8
+; CHECK-NEXT: ret
+entry:
+ %eor = xor i64 %a, 135
+ ret i64 %eor
+}
+
+; This constant should not be split because the split immediate is not valid
+; bitmask immediate.
+define i64 @test7_eor(i64 %a) {
+; CHECK-LABEL: test7_eor:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1024 // =0x400
+; CHECK-NEXT: movk w8, #33, lsl #16
+; CHECK-NEXT: eor x0, x0, x8
+; CHECK-NEXT: ret
+entry:
+ %eor = xor i64 %a, 2163712
+ ret i64 %eor
+}
+
+; Test ORR.
+define i32 @test1_orr(i32 %a) {
+; CHECK-LABEL: test1_orr:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: orr w8, w0, #0x400
+; CHECK-NEXT: orr w0, w8, #0x200000
+; CHECK-NEXT: ret
+entry:
+ %orr = or i32 %a, 2098176
+ ret i32 %orr
+}
+
+; This constant should not be split because it can be handled by one mov.
+define i32 @test2_orr(i32 %a) {
+; CHECK-LABEL: test2_orr:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #135 // =0x87
+; CHECK-NEXT: orr w0, w0, w8
+; CHECK-NEXT: ret
+entry:
+ %orr = or i32 %a, 135
+ ret i32 %orr
+}
+
+; This constant should not be split because the split immediate is not valid
+; bitmask immediate.
+define i32 @test3_orr(i32 %a) {
+; CHECK-LABEL: test3_orr:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1024 // =0x400
+; CHECK-NEXT: movk w8, #33, lsl #16
+; CHECK-NEXT: orr w0, w0, w8
+; CHECK-NEXT: ret
+entry:
+ %orr = or i32 %a, 2163712
+ ret i32 %orr
+}
+
+define i64 @test4_orr(i64 %a) {
+; CHECK-LABEL: test4_orr:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: orr x8, x0, #0x400
+; CHECK-NEXT: orr x0, x8, #0x200000
+; CHECK-NEXT: ret
+entry:
+ %orr = or i64 %a, 2098176
+ ret i64 %orr
+}
+
+define i64 @test5_orr(i64 %a) {
+; CHECK-LABEL: test5_orr:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: orr x8, x0, #0x4000
+; CHECK-NEXT: orr x0, x8, #0x200000000
+; CHECK-NEXT: ret
+entry:
+ %orr = or i64 %a, 8589950976
+ ret i64 %orr
+}
+
+; This constant should not be split because it can be handled by one mov.
+define i64 @test6_orr(i64 %a) {
+; CHECK-LABEL: test6_orr:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #135 // =0x87
+; CHECK-NEXT: orr x0, x0, x8
+; CHECK-NEXT: ret
+entry:
+ %orr = or i64 %a, 135
+ ret i64 %orr
+}
+
+; This constant should not be split because the split immediate is not valid
+; bitmask immediate.
+define i64 @test7_orr(i64 %a) {
+; CHECK-LABEL: test7_orr:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #1024 // =0x400
+; CHECK-NEXT: movk w8, #33, lsl #16
+; CHECK-NEXT: orr x0, x0, x8
+; CHECK-NEXT: ret
+entry:
+ %orr = or i64 %a, 2163712
+ ret i64 %orr
+}
diff --git a/llvm/test/CodeGen/AArch64/arm64-ext.ll b/llvm/test/CodeGen/AArch64/arm64-ext.ll
index 8bf2b82..c367057 100644
--- a/llvm/test/CodeGen/AArch64/arm64-ext.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-ext.ll
@@ -139,9 +139,8 @@ define <2 x ptr> @test_v2p0(<2 x ptr> %a, <2 x ptr> %b) {
define <16 x i8> @reverse_vector_s8x16b(<16 x i8> noundef %x) {
; CHECK-SD-LABEL: reverse_vector_s8x16b:
; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: rev64 v1.16b, v0.16b
-; CHECK-SD-NEXT: ext v0.16b, v1.16b, v1.16b, #8
-; CHECK-SD-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT: rev64 v0.16b, v0.16b
+; CHECK-SD-NEXT: ext v0.16b, v0.16b, v0.16b, #8
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: reverse_vector_s8x16b:
@@ -161,9 +160,8 @@ entry:
define <8 x i16> @reverse_vector_s16x8b(<8 x i16> noundef %x) {
; CHECK-SD-LABEL: reverse_vector_s16x8b:
; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: rev64 v1.8h, v0.8h
-; CHECK-SD-NEXT: ext v0.16b, v1.16b, v1.16b, #8
-; CHECK-SD-NEXT: mov v0.d[1], v1.d[0]
+; CHECK-SD-NEXT: rev64 v0.8h, v0.8h
+; CHECK-SD-NEXT: ext v0.16b, v0.16b, v0.16b, #8
; CHECK-SD-NEXT: ret
;
; CHECK-GI-LABEL: reverse_vector_s16x8b:
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll
index a066b15..e6a8bac 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll
@@ -1917,8 +1917,9 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() {
; GFX9-NEXT: s_mov_b32 s0, 0
; GFX9-NEXT: scratch_store_dword off, v0, s0 offset:4
; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_movk_i32 s0, 0x3e80
; GFX9-NEXT: v_mov_b32_e32 v0, 15
-; GFX9-NEXT: s_movk_i32 s0, 0x3e84
+; GFX9-NEXT: s_add_i32 s0, s0, 4
; GFX9-NEXT: scratch_store_dword off, v0, s0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: scratch_load_dword v0, off, s0 glc
@@ -1933,7 +1934,8 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() {
; GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s9
; GFX10-NEXT: v_mov_b32_e32 v0, 13
; GFX10-NEXT: v_mov_b32_e32 v1, 15
-; GFX10-NEXT: s_movk_i32 s0, 0x3e84
+; GFX10-NEXT: s_movk_i32 s0, 0x3e80
+; GFX10-NEXT: s_add_i32 s0, s0, 4
; GFX10-NEXT: scratch_store_dword off, v0, off offset:4
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: scratch_store_dword off, v1, s0
@@ -1945,10 +1947,11 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() {
; GFX942-LABEL: store_load_large_imm_offset_kernel:
; GFX942: ; %bb.0: ; %bb
; GFX942-NEXT: v_mov_b32_e32 v0, 13
+; GFX942-NEXT: s_movk_i32 s0, 0x3e80
; GFX942-NEXT: scratch_store_dword off, v0, off offset:4 sc0 sc1
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: v_mov_b32_e32 v0, 15
-; GFX942-NEXT: s_movk_i32 s0, 0x3e84
+; GFX942-NEXT: s_add_i32 s0, s0, 4
; GFX942-NEXT: scratch_store_dword off, v0, s0 sc0 sc1
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: scratch_load_dword v0, off, s0 sc0 sc1
@@ -1958,7 +1961,9 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() {
; GFX11-LABEL: store_load_large_imm_offset_kernel:
; GFX11: ; %bb.0: ; %bb
; GFX11-NEXT: v_dual_mov_b32 v0, 13 :: v_dual_mov_b32 v1, 15
-; GFX11-NEXT: s_movk_i32 s0, 0x3e84
+; GFX11-NEXT: s_movk_i32 s0, 0x3e80
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_add_i32 s0, s0, 4
; GFX11-NEXT: scratch_store_b32 off, v0, off offset:4 dlc
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: scratch_store_b32 off, v1, s0 dlc
@@ -1986,8 +1991,9 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() {
; UNALIGNED_GFX9-NEXT: s_mov_b32 s0, 0
; UNALIGNED_GFX9-NEXT: scratch_store_dword off, v0, s0 offset:4
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
+; UNALIGNED_GFX9-NEXT: s_movk_i32 s0, 0x3e80
; UNALIGNED_GFX9-NEXT: v_mov_b32_e32 v0, 15
-; UNALIGNED_GFX9-NEXT: s_movk_i32 s0, 0x3e84
+; UNALIGNED_GFX9-NEXT: s_add_i32 s0, s0, 4
; UNALIGNED_GFX9-NEXT: scratch_store_dword off, v0, s0
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
; UNALIGNED_GFX9-NEXT: scratch_load_dword v0, off, s0 glc
@@ -2002,7 +2008,8 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() {
; UNALIGNED_GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s9
; UNALIGNED_GFX10-NEXT: v_mov_b32_e32 v0, 13
; UNALIGNED_GFX10-NEXT: v_mov_b32_e32 v1, 15
-; UNALIGNED_GFX10-NEXT: s_movk_i32 s0, 0x3e84
+; UNALIGNED_GFX10-NEXT: s_movk_i32 s0, 0x3e80
+; UNALIGNED_GFX10-NEXT: s_add_i32 s0, s0, 4
; UNALIGNED_GFX10-NEXT: scratch_store_dword off, v0, off offset:4
; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; UNALIGNED_GFX10-NEXT: scratch_store_dword off, v1, s0
@@ -2014,10 +2021,11 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() {
; UNALIGNED_GFX942-LABEL: store_load_large_imm_offset_kernel:
; UNALIGNED_GFX942: ; %bb.0: ; %bb
; UNALIGNED_GFX942-NEXT: v_mov_b32_e32 v0, 13
+; UNALIGNED_GFX942-NEXT: s_movk_i32 s0, 0x3e80
; UNALIGNED_GFX942-NEXT: scratch_store_dword off, v0, off offset:4 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
; UNALIGNED_GFX942-NEXT: v_mov_b32_e32 v0, 15
-; UNALIGNED_GFX942-NEXT: s_movk_i32 s0, 0x3e84
+; UNALIGNED_GFX942-NEXT: s_add_i32 s0, s0, 4
; UNALIGNED_GFX942-NEXT: scratch_store_dword off, v0, s0 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
; UNALIGNED_GFX942-NEXT: scratch_load_dword v0, off, s0 sc0 sc1
@@ -2027,7 +2035,9 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() {
; UNALIGNED_GFX11-LABEL: store_load_large_imm_offset_kernel:
; UNALIGNED_GFX11: ; %bb.0: ; %bb
; UNALIGNED_GFX11-NEXT: v_dual_mov_b32 v0, 13 :: v_dual_mov_b32 v1, 15
-; UNALIGNED_GFX11-NEXT: s_movk_i32 s0, 0x3e84
+; UNALIGNED_GFX11-NEXT: s_movk_i32 s0, 0x3e80
+; UNALIGNED_GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; UNALIGNED_GFX11-NEXT: s_add_i32 s0, s0, 4
; UNALIGNED_GFX11-NEXT: scratch_store_b32 off, v0, off offset:4 dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; UNALIGNED_GFX11-NEXT: scratch_store_b32 off, v1, s0 dlc
@@ -2061,11 +2071,13 @@ define void @store_load_large_imm_offset_foo() {
; GFX9-LABEL: store_load_large_imm_offset_foo:
; GFX9: ; %bb.0: ; %bb
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_movk_i32 s0, 0x3e80
; GFX9-NEXT: v_mov_b32_e32 v0, 13
+; GFX9-NEXT: s_add_i32 s1, s32, s0
; GFX9-NEXT: scratch_store_dword off, v0, s32 offset:4
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v0, 15
-; GFX9-NEXT: s_add_i32 s0, s32, 0x3e84
+; GFX9-NEXT: s_add_i32 s0, s1, 4
; GFX9-NEXT: scratch_store_dword off, v0, s0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: scratch_load_dword v0, off, s0 glc
@@ -2076,8 +2088,10 @@ define void @store_load_large_imm_offset_foo() {
; GFX10: ; %bb.0: ; %bb
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v0, 13
+; GFX10-NEXT: s_movk_i32 s0, 0x3e80
; GFX10-NEXT: v_mov_b32_e32 v1, 15
-; GFX10-NEXT: s_add_i32 s0, s32, 0x3e84
+; GFX10-NEXT: s_add_i32 s1, s32, s0
+; GFX10-NEXT: s_add_i32 s0, s1, 4
; GFX10-NEXT: scratch_store_dword off, v0, s32 offset:4
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: scratch_store_dword off, v1, s0
@@ -2089,11 +2103,13 @@ define void @store_load_large_imm_offset_foo() {
; GFX942-LABEL: store_load_large_imm_offset_foo:
; GFX942: ; %bb.0: ; %bb
; GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT: s_movk_i32 s0, 0x3e80
; GFX942-NEXT: v_mov_b32_e32 v0, 13
+; GFX942-NEXT: s_add_i32 s1, s32, s0
; GFX942-NEXT: scratch_store_dword off, v0, s32 offset:4 sc0 sc1
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: v_mov_b32_e32 v0, 15
-; GFX942-NEXT: s_add_i32 s0, s32, 0x3e84
+; GFX942-NEXT: s_add_i32 s0, s1, 4
; GFX942-NEXT: scratch_store_dword off, v0, s0 sc0 sc1
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: scratch_load_dword v0, off, s0 sc0 sc1
@@ -2104,7 +2120,10 @@ define void @store_load_large_imm_offset_foo() {
; GFX11: ; %bb.0: ; %bb
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_dual_mov_b32 v0, 13 :: v_dual_mov_b32 v1, 15
-; GFX11-NEXT: s_add_i32 s0, s32, 0x3e84
+; GFX11-NEXT: s_movk_i32 s0, 0x3e80
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: s_add_i32 s1, s32, s0
+; GFX11-NEXT: s_add_i32 s0, s1, 4
; GFX11-NEXT: scratch_store_b32 off, v0, s32 offset:4 dlc
; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-NEXT: scratch_store_b32 off, v1, s0 dlc
@@ -2133,11 +2152,13 @@ define void @store_load_large_imm_offset_foo() {
; UNALIGNED_GFX9-LABEL: store_load_large_imm_offset_foo:
; UNALIGNED_GFX9: ; %bb.0: ; %bb
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; UNALIGNED_GFX9-NEXT: s_movk_i32 s0, 0x3e80
; UNALIGNED_GFX9-NEXT: v_mov_b32_e32 v0, 13
+; UNALIGNED_GFX9-NEXT: s_add_i32 s1, s32, s0
; UNALIGNED_GFX9-NEXT: scratch_store_dword off, v0, s32 offset:4
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
; UNALIGNED_GFX9-NEXT: v_mov_b32_e32 v0, 15
-; UNALIGNED_GFX9-NEXT: s_add_i32 s0, s32, 0x3e84
+; UNALIGNED_GFX9-NEXT: s_add_i32 s0, s1, 4
; UNALIGNED_GFX9-NEXT: scratch_store_dword off, v0, s0
; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
; UNALIGNED_GFX9-NEXT: scratch_load_dword v0, off, s0 glc
@@ -2148,8 +2169,10 @@ define void @store_load_large_imm_offset_foo() {
; UNALIGNED_GFX10: ; %bb.0: ; %bb
; UNALIGNED_GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; UNALIGNED_GFX10-NEXT: v_mov_b32_e32 v0, 13
+; UNALIGNED_GFX10-NEXT: s_movk_i32 s0, 0x3e80
; UNALIGNED_GFX10-NEXT: v_mov_b32_e32 v1, 15
-; UNALIGNED_GFX10-NEXT: s_add_i32 s0, s32, 0x3e84
+; UNALIGNED_GFX10-NEXT: s_add_i32 s1, s32, s0
+; UNALIGNED_GFX10-NEXT: s_add_i32 s0, s1, 4
; UNALIGNED_GFX10-NEXT: scratch_store_dword off, v0, s32 offset:4
; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; UNALIGNED_GFX10-NEXT: scratch_store_dword off, v1, s0
@@ -2161,11 +2184,13 @@ define void @store_load_large_imm_offset_foo() {
; UNALIGNED_GFX942-LABEL: store_load_large_imm_offset_foo:
; UNALIGNED_GFX942: ; %bb.0: ; %bb
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; UNALIGNED_GFX942-NEXT: s_movk_i32 s0, 0x3e80
; UNALIGNED_GFX942-NEXT: v_mov_b32_e32 v0, 13
+; UNALIGNED_GFX942-NEXT: s_add_i32 s1, s32, s0
; UNALIGNED_GFX942-NEXT: scratch_store_dword off, v0, s32 offset:4 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
; UNALIGNED_GFX942-NEXT: v_mov_b32_e32 v0, 15
-; UNALIGNED_GFX942-NEXT: s_add_i32 s0, s32, 0x3e84
+; UNALIGNED_GFX942-NEXT: s_add_i32 s0, s1, 4
; UNALIGNED_GFX942-NEXT: scratch_store_dword off, v0, s0 sc0 sc1
; UNALIGNED_GFX942-NEXT: s_waitcnt vmcnt(0)
; UNALIGNED_GFX942-NEXT: scratch_load_dword v0, off, s0 sc0 sc1
@@ -2176,7 +2201,10 @@ define void @store_load_large_imm_offset_foo() {
; UNALIGNED_GFX11: ; %bb.0: ; %bb
; UNALIGNED_GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; UNALIGNED_GFX11-NEXT: v_dual_mov_b32 v0, 13 :: v_dual_mov_b32 v1, 15
-; UNALIGNED_GFX11-NEXT: s_add_i32 s0, s32, 0x3e84
+; UNALIGNED_GFX11-NEXT: s_movk_i32 s0, 0x3e80
+; UNALIGNED_GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; UNALIGNED_GFX11-NEXT: s_add_i32 s1, s32, s0
+; UNALIGNED_GFX11-NEXT: s_add_i32 s0, s1, 4
; UNALIGNED_GFX11-NEXT: scratch_store_b32 off, v0, s32 offset:4 dlc
; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0
; UNALIGNED_GFX11-NEXT: scratch_store_b32 off, v1, s0 dlc
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll
index 2785b78..481a254 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/fp64-atomics-gfx90a.ll
@@ -2243,36 +2243,22 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat(ptr addrspace(3) %ptr
;
; GFX1250-LABEL: local_atomic_fadd_f64_noret_pat:
; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-NEXT: s_mov_b32 s1, exec_lo
-; GFX1250-NEXT: s_mov_b32 s0, 0
-; GFX1250-NEXT: v_mbcnt_lo_u32_b32 v0, s1, 0
-; GFX1250-NEXT: s_mov_b32 s2, exec_lo
+; GFX1250-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-NEXT: v_cmpx_eq_u32_e32 0, v0
-; GFX1250-NEXT: s_cbranch_execz .LBB51_3
+; GFX1250-NEXT: s_cbranch_execz .LBB51_2
; GFX1250-NEXT: ; %bb.1:
-; GFX1250-NEXT: s_bcnt1_i32_b32 s1, s1
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1250-NEXT: v_cvt_f64_u32_e32 v[0:1], s1
-; GFX1250-NEXT: s_load_b32 s1, s[4:5], 0x24
+; GFX1250-NEXT: s_bcnt1_i32_b32 s0, s0
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1250-NEXT: s_load_b32 s0, s[4:5], 0x24
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_mov_b32_e32 v4, s1
-; GFX1250-NEXT: ds_load_b64 v[2:3], v4
-; GFX1250-NEXT: v_mul_f64_e32 v[0:1], 4.0, v[0:1]
-; GFX1250-NEXT: .LBB51_2: ; %atomicrmw.start
-; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-NEXT: v_add_f64_e32 v[6:7], v[2:3], v[0:1]
-; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[6:7], v4, v[6:7], v[2:3]
+; GFX1250-NEXT: v_dual_mul_f64 v[0:1], 4.0, v[0:1] :: v_dual_mov_b32 v2, s0
+; GFX1250-NEXT: ds_add_f64 v2, v[0:1]
; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[6:7], v[2:3]
-; GFX1250-NEXT: v_mov_b64_e32 v[2:3], v[6:7]
-; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX1250-NEXT: s_cbranch_execnz .LBB51_2
-; GFX1250-NEXT: .LBB51_3:
+; GFX1250-NEXT: .LBB51_2:
; GFX1250-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst, !amdgpu.no.fine.grained.memory !0
@@ -2322,36 +2308,22 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush(ptr addrspace(3
;
; GFX1250-LABEL: local_atomic_fadd_f64_noret_pat_flush:
; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-NEXT: s_mov_b32 s1, exec_lo
-; GFX1250-NEXT: s_mov_b32 s0, 0
-; GFX1250-NEXT: v_mbcnt_lo_u32_b32 v0, s1, 0
-; GFX1250-NEXT: s_mov_b32 s2, exec_lo
+; GFX1250-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-NEXT: v_cmpx_eq_u32_e32 0, v0
-; GFX1250-NEXT: s_cbranch_execz .LBB52_3
+; GFX1250-NEXT: s_cbranch_execz .LBB52_2
; GFX1250-NEXT: ; %bb.1:
-; GFX1250-NEXT: s_bcnt1_i32_b32 s1, s1
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1250-NEXT: v_cvt_f64_u32_e32 v[0:1], s1
-; GFX1250-NEXT: s_load_b32 s1, s[4:5], 0x24
+; GFX1250-NEXT: s_bcnt1_i32_b32 s0, s0
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1250-NEXT: s_load_b32 s0, s[4:5], 0x24
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_mov_b32_e32 v4, s1
-; GFX1250-NEXT: ds_load_b64 v[2:3], v4
-; GFX1250-NEXT: v_mul_f64_e32 v[0:1], 4.0, v[0:1]
-; GFX1250-NEXT: .LBB52_2: ; %atomicrmw.start
-; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: v_dual_mul_f64 v[0:1], 4.0, v[0:1] :: v_dual_mov_b32 v2, s0
+; GFX1250-NEXT: ds_add_f64 v2, v[0:1]
; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-NEXT: v_add_f64_e32 v[6:7], v[2:3], v[0:1]
-; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[6:7], v4, v[6:7], v[2:3]
-; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[6:7], v[2:3]
-; GFX1250-NEXT: v_mov_b64_e32 v[2:3], v[6:7]
-; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX1250-NEXT: s_cbranch_execnz .LBB52_2
-; GFX1250-NEXT: .LBB52_3:
+; GFX1250-NEXT: .LBB52_2:
; GFX1250-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst, !amdgpu.no.fine.grained.memory !0
@@ -2401,36 +2373,22 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush_safe(ptr addrsp
;
; GFX1250-LABEL: local_atomic_fadd_f64_noret_pat_flush_safe:
; GFX1250: ; %bb.0: ; %main_body
+; GFX1250-NEXT: s_mov_b32 s0, exec_lo
; GFX1250-NEXT: s_mov_b32 s1, exec_lo
-; GFX1250-NEXT: s_mov_b32 s0, 0
-; GFX1250-NEXT: v_mbcnt_lo_u32_b32 v0, s1, 0
-; GFX1250-NEXT: s_mov_b32 s2, exec_lo
+; GFX1250-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-NEXT: v_cmpx_eq_u32_e32 0, v0
-; GFX1250-NEXT: s_cbranch_execz .LBB53_3
+; GFX1250-NEXT: s_cbranch_execz .LBB53_2
; GFX1250-NEXT: ; %bb.1:
-; GFX1250-NEXT: s_bcnt1_i32_b32 s1, s1
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1250-NEXT: v_cvt_f64_u32_e32 v[0:1], s1
-; GFX1250-NEXT: s_load_b32 s1, s[4:5], 0x24
+; GFX1250-NEXT: s_bcnt1_i32_b32 s0, s0
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1250-NEXT: s_load_b32 s0, s[4:5], 0x24
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_mov_b32_e32 v4, s1
-; GFX1250-NEXT: ds_load_b64 v[2:3], v4
-; GFX1250-NEXT: v_mul_f64_e32 v[0:1], 4.0, v[0:1]
-; GFX1250-NEXT: .LBB53_2: ; %atomicrmw.start
-; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-NEXT: v_add_f64_e32 v[6:7], v[2:3], v[0:1]
-; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[6:7], v4, v[6:7], v[2:3]
+; GFX1250-NEXT: v_dual_mul_f64 v[0:1], 4.0, v[0:1] :: v_dual_mov_b32 v2, s0
+; GFX1250-NEXT: ds_add_f64 v2, v[0:1]
; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[6:7], v[2:3]
-; GFX1250-NEXT: v_mov_b64_e32 v[2:3], v[6:7]
-; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX1250-NEXT: s_cbranch_execnz .LBB53_2
-; GFX1250-NEXT: .LBB53_3:
+; GFX1250-NEXT: .LBB53_2:
; GFX1250-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst, !amdgpu.no.fine.grained.memory !0
@@ -2459,23 +2417,9 @@ define double @local_atomic_fadd_f64_rtn_pat(ptr addrspace(3) %ptr, double %data
; GFX1250: ; %bb.0: ; %main_body
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_mov_b32_e32 v2, v0
-; GFX1250-NEXT: ds_load_b64 v[0:1], v0
-; GFX1250-NEXT: s_mov_b32 s0, 0
-; GFX1250-NEXT: .LBB54_1: ; %atomicrmw.start
-; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: v_mov_b64_e32 v[4:5], v[0:1]
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX1250-NEXT: v_add_f64_e32 v[0:1], 4.0, v[4:5]
-; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[0:1], v2, v[0:1], v[4:5]
+; GFX1250-NEXT: v_mov_b64_e32 v[2:3], 4.0
+; GFX1250-NEXT: ds_add_rtn_f64 v[0:1], v0, v[2:3]
; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX1250-NEXT: s_cbranch_execnz .LBB54_1
-; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-NEXT: s_set_pc_i64 s[30:31]
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.atomic.add.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.atomic.add.ll
index 62f8f89..79a9291 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.atomic.add.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.atomic.add.ll
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=instruction-select -o - %s | FileCheck -check-prefixes=GFX8 %s
-; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck --check-prefixes=GFX12 %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck --check-prefixes=GFX12,GFX1200 %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 -stop-after=instruction-select -o - %s | FileCheck --check-prefixes=GFX12,GFX1250 %s
; Natural mapping
define amdgpu_ps float @raw_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(i32 %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
@@ -99,26 +100,47 @@ define amdgpu_ps <2 x float> @raw_buffer_atomic_add_i64__vgpr_val__sgpr_rsrc__vg
; GFX8-NEXT: $vgpr1 = COPY [[COPY9]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
;
- ; GFX12-LABEL: name: raw_buffer_atomic_add_i64__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE]], [[COPY6]], [[REG_SEQUENCE1]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN]].sub0
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN]].sub1
- ; GFX12-NEXT: $vgpr0 = COPY [[COPY8]]
- ; GFX12-NEXT: $vgpr1 = COPY [[COPY9]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
+ ; GFX1200-LABEL: name: raw_buffer_atomic_add_i64__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE]], [[COPY6]], [[REG_SEQUENCE1]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN]].sub0
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN]].sub1
+ ; GFX1200-NEXT: $vgpr0 = COPY [[COPY8]]
+ ; GFX1200-NEXT: $vgpr1 = COPY [[COPY9]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_atomic_add_i64__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64_align2 = BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE]], [[COPY6]], [[REG_SEQUENCE1]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN]].sub0
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN_RTN]].sub1
+ ; GFX1250-NEXT: $vgpr0 = COPY [[COPY8]]
+ ; GFX1250-NEXT: $vgpr1 = COPY [[COPY9]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
%ret = call i64 @llvm.amdgcn.raw.buffer.atomic.add.i64(i64 %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
%cast = bitcast i64 %ret to <2 x float>
ret <2 x float> %cast
@@ -142,22 +164,39 @@ define amdgpu_ps void @raw_buffer_atomic_add_i64_noret__vgpr_val__sgpr_rsrc__vgp
; GFX8-NEXT: BUFFER_ATOMIC_ADD_X2_OFFEN [[REG_SEQUENCE]], [[COPY6]], [[REG_SEQUENCE1]], [[COPY7]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_atomic_add_i64_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN [[REG_SEQUENCE]], [[COPY6]], [[REG_SEQUENCE1]], [[COPY7]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_atomic_add_i64_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN [[REG_SEQUENCE]], [[COPY6]], [[REG_SEQUENCE1]], [[COPY7]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_atomic_add_i64_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: BUFFER_ATOMIC_ADD_X2_VBUFFER_OFFEN [[REG_SEQUENCE]], [[COPY6]], [[REG_SEQUENCE1]], [[COPY7]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
%ret = call i64 @llvm.amdgcn.raw.buffer.atomic.add.i64(i64 %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -217,58 +256,111 @@ define amdgpu_ps float @raw_buffer_atomic_add_i32__sgpr_val__vgpr_rsrc__sgpr_vof
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_OFFEN_RTN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: raw_buffer_atomic_add_i32__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY12]], [[COPY10]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN [[COPY7]], [[COPY8]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: raw_buffer_atomic_add_i32__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY12]], [[COPY10]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN [[COPY7]], [[COPY8]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_atomic_add_i32__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY12]], [[COPY10]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN [[COPY7]], [[COPY8]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%ret = call i32 @llvm.amdgcn.raw.buffer.atomic.add.i32(i32 %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
%cast = bitcast i32 %ret to float
ret float %cast
@@ -328,57 +420,109 @@ define amdgpu_ps void @raw_buffer_atomic_add_i32_noret__sgpr_val__vgpr_rsrc__sgp
; GFX8-NEXT: bb.5:
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_atomic_add_i32_noret__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY12]], [[COPY10]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: BUFFER_ATOMIC_ADD_VBUFFER_OFFEN [[COPY7]], [[COPY8]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_atomic_add_i32_noret__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY12]], [[COPY10]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: BUFFER_ATOMIC_ADD_VBUFFER_OFFEN [[COPY7]], [[COPY8]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_atomic_add_i32_noret__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY12]], [[COPY10]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: BUFFER_ATOMIC_ADD_VBUFFER_OFFEN [[COPY7]], [[COPY8]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: S_ENDPGM 0
%ret = call i32 @llvm.amdgcn.raw.buffer.atomic.add.i32(i32 %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -400,21 +544,40 @@ define amdgpu_ps float @raw_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_vof
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_OFFEN_RTN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: raw_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add4095
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4095, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: raw_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add4095
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN [[COPY]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4095, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add4095
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec
+ ; GFX1250-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN [[COPY]], [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY6]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_OFFEN_RTN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%voffset = add i32 %voffset.base, 4095
%ret = call i32 @llvm.amdgcn.raw.buffer.atomic.add.i32(i32 %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
%cast = bitcast i32 %ret to float
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.atomic.cmpswap.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.atomic.cmpswap.ll
index 364ed62..9f1b7a6 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.atomic.cmpswap.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.atomic.cmpswap.ll
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=instruction-select -o - %s | FileCheck --check-prefixes=GFX8 %s
-; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck --check-prefixes=GFX12 %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck --check-prefix=GFX1200 %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 -stop-after=instruction-select -o - %s | FileCheck --check-prefix=GFX1250 %s
; Natural mapping
@@ -24,24 +25,43 @@ define amdgpu_ps float @raw_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_
; GFX8-NEXT: $vgpr0 = COPY [[COPY8]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: raw_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN]].sub0
- ; GFX12-NEXT: $vgpr0 = COPY [[COPY8]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: raw_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN]].sub0
+ ; GFX1200-NEXT: $vgpr0 = COPY [[COPY8]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64_align2 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN]].sub0
+ ; GFX1250-NEXT: $vgpr0 = COPY [[COPY8]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%ret = call i32 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
%cast = bitcast i32 %ret to float
ret float %cast
@@ -66,22 +86,39 @@ define amdgpu_ps void @raw_buffer_atomic_cmpswap_i32_noret__vgpr_val__vgpr_cmp__
; GFX8-NEXT: BUFFER_ATOMIC_CMPSWAP_OFFEN [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_atomic_cmpswap_i32_noret__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX12-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_atomic_cmpswap_i32_noret__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1200-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_atomic_cmpswap_i32_noret__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1250-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
%ret = call i32 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -145,62 +182,119 @@ define amdgpu_ps float @raw_buffer_atomic_cmpswap_i32__sgpr_val__sgpr_cmp__vgpr_
; GFX8-NEXT: $vgpr0 = COPY [[COPY15]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: raw_buffer_atomic_cmpswap_i32__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY1]]
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
- ; GFX12-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
- ; GFX12-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN [[REG_SEQUENCE2]], [[COPY10]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY15:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN]].sub0
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: $vgpr0 = COPY [[COPY15]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: raw_buffer_atomic_cmpswap_i32__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY1]]
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN [[REG_SEQUENCE2]], [[COPY10]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY15:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN]].sub0
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: $vgpr0 = COPY [[COPY15]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_atomic_cmpswap_i32__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY1]]
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64_align2 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN [[REG_SEQUENCE2]], [[COPY10]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY15:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN]].sub0
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: $vgpr0 = COPY [[COPY15]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%ret = call i32 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
%cast = bitcast i32 %ret to float
ret float %cast
@@ -263,60 +357,115 @@ define amdgpu_ps void @raw_buffer_atomic_cmpswap_i32_noret__sgpr_val__sgpr_cmp__
; GFX8-NEXT: bb.5:
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_atomic_cmpswap_i32_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY1]]
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
- ; GFX12-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
- ; GFX12-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
- ; GFX12-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN [[REG_SEQUENCE2]], [[COPY10]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_atomic_cmpswap_i32_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY1]]
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
+ ; GFX1200-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN [[REG_SEQUENCE2]], [[COPY10]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_atomic_cmpswap_i32_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY1]]
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
+ ; GFX1250-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN [[REG_SEQUENCE2]], [[COPY10]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: S_ENDPGM 0
%ret = call i32 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -341,24 +490,46 @@ define amdgpu_ps float @raw_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_
; GFX8-NEXT: $vgpr0 = COPY [[COPY8]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: raw_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4095
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 4095, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN]].sub0
- ; GFX12-NEXT: $vgpr0 = COPY [[COPY8]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: raw_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4095
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 4095, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN]].sub0
+ ; GFX1200-NEXT: $vgpr0 = COPY [[COPY8]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4095
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY6]], [[COPY8]], 0, implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_64_align2 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN [[REG_SEQUENCE1]], [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_OFFEN_RTN]].sub0
+ ; GFX1250-NEXT: $vgpr0 = COPY [[COPY9]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%voffset = add i32 %voffset.base, 4095
%ret = call i32 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
%cast = bitcast i32 %ret to float
@@ -395,33 +566,61 @@ define amdgpu_ps double @raw_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr
; GFX8-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
;
- ; GFX12-LABEL: name: raw_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3
- ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE3]], [[COPY8]], [[REG_SEQUENCE2]], [[COPY9]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN]].sub0_sub1
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY10]].sub0
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY10]].sub1
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY11]], implicit $exec
- ; GFX12-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec
- ; GFX12-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
+ ; GFX1200-LABEL: name: raw_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3
+ ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE3]], [[COPY8]], [[REG_SEQUENCE2]], [[COPY9]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY10]].sub0
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY10]].sub1
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY11]], implicit $exec
+ ; GFX1200-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec
+ ; GFX1200-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3
+ ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_128_align2 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE3]], [[COPY8]], [[REG_SEQUENCE2]], [[COPY9]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vreg_64_align2 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY10]].sub0
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY10]].sub1
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY11]], implicit $exec
+ ; GFX1250-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec
+ ; GFX1250-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
%ret = call i64 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i64(i64 %val, i64 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
%cast = bitcast i64 %ret to double
ret double %cast
@@ -450,26 +649,47 @@ define amdgpu_ps void @raw_buffer_atomic_cmpswap_i64_noret__vgpr_val__vgpr_cmp__
; GFX8-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_OFFEN [[REG_SEQUENCE3]], [[COPY8]], [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_atomic_cmpswap_i64_noret__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3
- ; GFX12-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN [[REG_SEQUENCE3]], [[COPY8]], [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_atomic_cmpswap_i64_noret__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3
+ ; GFX1200-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN [[REG_SEQUENCE3]], [[COPY8]], [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_atomic_cmpswap_i64_noret__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3
+ ; GFX1250-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN [[REG_SEQUENCE3]], [[COPY8]], [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
%ret = call i64 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i64(i64 %val, i64 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -542,71 +762,137 @@ define amdgpu_ps double @raw_buffer_atomic_cmpswap_i64__sgpr_val__sgpr_cmp__vgpr
; GFX8-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_6]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
;
- ; GFX12-LABEL: name: raw_buffer_atomic_cmpswap_i64__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]]
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]]
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY8]]
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1
- ; GFX12-NEXT: [[COPY14:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3
- ; GFX12-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1
- ; GFX12-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY9]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY10]], %subreg.sub0_sub1, [[COPY11]], %subreg.sub2_sub3
- ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE4]], [[COPY12]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY17:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN]].sub0_sub1
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: [[COPY18:%[0-9]+]]:vgpr_32 = COPY [[COPY17]].sub0
- ; GFX12-NEXT: [[COPY19:%[0-9]+]]:vgpr_32 = COPY [[COPY17]].sub1
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_5:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY18]], implicit $exec
- ; GFX12-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_5]]
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_6:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY19]], implicit $exec
- ; GFX12-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_6]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
+ ; GFX1200-LABEL: name: raw_buffer_atomic_cmpswap_i64__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]]
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]]
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY8]]
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY14:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY9]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY10]], %subreg.sub0_sub1, [[COPY11]], %subreg.sub2_sub3
+ ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE4]], [[COPY12]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY17:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN]].sub0_sub1
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: [[COPY18:%[0-9]+]]:vgpr_32 = COPY [[COPY17]].sub0
+ ; GFX1200-NEXT: [[COPY19:%[0-9]+]]:vgpr_32 = COPY [[COPY17]].sub1
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_5:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY18]], implicit $exec
+ ; GFX1200-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_5]]
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_6:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY19]], implicit $exec
+ ; GFX1200-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_6]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_atomic_cmpswap_i64__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]]
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE1]]
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY8]]
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE2]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE2]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY9]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY10]], %subreg.sub0_sub1, [[COPY11]], %subreg.sub2_sub3
+ ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_128_align2 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE4]], [[COPY12]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY17:%[0-9]+]]:vreg_64_align2 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN]].sub0_sub1
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: [[COPY18:%[0-9]+]]:vgpr_32 = COPY [[COPY17]].sub0
+ ; GFX1250-NEXT: [[COPY19:%[0-9]+]]:vgpr_32 = COPY [[COPY17]].sub1
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_5:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY18]], implicit $exec
+ ; GFX1250-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_5]]
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_6:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY19]], implicit $exec
+ ; GFX1250-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_6]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
%ret = call i64 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i64(i64 %val, i64 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
%cast = bitcast i64 %ret to double
ret double %cast
@@ -673,64 +959,123 @@ define amdgpu_ps void @raw_buffer_atomic_cmpswap_i64_noret__sgpr_val__sgpr_cmp__
; GFX8-NEXT: bb.5:
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_atomic_cmpswap_i64_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]]
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]]
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY8]]
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1
- ; GFX12-NEXT: [[COPY14:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3
- ; GFX12-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1
- ; GFX12-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY9]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY10]], %subreg.sub0_sub1, [[COPY11]], %subreg.sub2_sub3
- ; GFX12-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN [[REG_SEQUENCE4]], [[COPY12]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_atomic_cmpswap_i64_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]]
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]]
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY8]]
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY14:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY9]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY10]], %subreg.sub0_sub1, [[COPY11]], %subreg.sub2_sub3
+ ; GFX1200-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN [[REG_SEQUENCE4]], [[COPY12]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_atomic_cmpswap_i64_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]]
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE1]]
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY8]]
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE2]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE2]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY9]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY10]], %subreg.sub0_sub1, [[COPY11]], %subreg.sub2_sub3
+ ; GFX1250-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN [[REG_SEQUENCE4]], [[COPY12]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: S_ENDPGM 0
%ret = call i64 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i64(i64 %val, i64 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -764,33 +1109,64 @@ define amdgpu_ps double @raw_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr
; GFX8-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
;
- ; GFX12-LABEL: name: raw_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4095
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3
- ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE3]], [[COPY8]], [[REG_SEQUENCE2]], [[COPY9]], 4095, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN]].sub0_sub1
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY10]].sub0
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY10]].sub1
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY11]], implicit $exec
- ; GFX12-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec
- ; GFX12-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
+ ; GFX1200-LABEL: name: raw_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4095
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3
+ ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE3]], [[COPY8]], [[REG_SEQUENCE2]], [[COPY9]], 4095, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY10]].sub0
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY10]].sub1
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY11]], implicit $exec
+ ; GFX1200-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec
+ ; GFX1200-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4095
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY8]], [[COPY10]], 0, implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3
+ ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN:%[0-9]+]]:vreg_128_align2 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN [[REG_SEQUENCE3]], [[V_ADD_U32_e64_]], [[REG_SEQUENCE2]], [[COPY9]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vreg_64_align2 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_OFFEN_RTN]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub0
+ ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub1
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec
+ ; GFX1250-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY13]], implicit $exec
+ ; GFX1250-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
%voffset = add i32 %voffset.base, 4095
%ret = call i64 @llvm.amdgcn.raw.buffer.atomic.cmpswap.i64(i64 %val, i64 %cmp, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
%cast = bitcast i64 %ret to double
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.load.ll
index 46ca43b..7003bb1 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.load.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.load.ll
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=instruction-select -o - %s | FileCheck -check-prefixes=GFX8 %s
-; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefixes=GFX12 %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefixes=GFX12,GFX1200 %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 -stop-after=instruction-select -o - %s | FileCheck -check-prefixes=GFX12,GFX1250 %s
; FIXME: Test with SI when argument lowering not broken for f16
; Natural mapping
@@ -124,52 +125,99 @@ define amdgpu_ps float @raw_buffer_load_f32__vgpr_rsrc__vgpr_voffset__sgpr_soffs
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: raw_buffer_load_f32__vgpr_rsrc__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: raw_buffer_load_f32__vgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_f32__vgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret float %val
}
@@ -226,55 +274,105 @@ define amdgpu_ps float @raw_buffer_load_f32__vgpr_rsrc__vgpr_voffset__vgpr_soffs
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: raw_buffer_load_f32__vgpr_rsrc__vgpr_voffset__vgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY5]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: raw_buffer_load_f32__vgpr_rsrc__vgpr_voffset__vgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY5]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_f32__vgpr_rsrc__vgpr_voffset__vgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY5]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret float %val
}
@@ -509,23 +607,41 @@ define amdgpu_ps <2 x float> @raw_buffer_load_v2f32__sgpr_rsrc__vgpr_voffset__sg
; GFX8-NEXT: $vgpr1 = COPY [[COPY7]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
;
- ; GFX12-LABEL: name: raw_buffer_load_v2f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub0
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub1
- ; GFX12-NEXT: $vgpr0 = COPY [[COPY6]]
- ; GFX12-NEXT: $vgpr1 = COPY [[COPY7]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
+ ; GFX1200-LABEL: name: raw_buffer_load_v2f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub0
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub1
+ ; GFX1200-NEXT: $vgpr0 = COPY [[COPY6]]
+ ; GFX1200-NEXT: $vgpr1 = COPY [[COPY7]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_v2f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub0
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub1
+ ; GFX1250-NEXT: $vgpr0 = COPY [[COPY6]]
+ ; GFX1250-NEXT: $vgpr1 = COPY [[COPY7]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
%val = call <2 x float> @llvm.amdgcn.raw.buffer.load.v2f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret <2 x float> %val
}
@@ -551,25 +667,45 @@ define amdgpu_ps <3 x float> @raw_buffer_load_v3f32__sgpr_rsrc__vgpr_voffset__sg
; GFX8-NEXT: $vgpr2 = COPY [[COPY8]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
;
- ; GFX12-LABEL: name: raw_buffer_load_v3f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN]].sub0
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN]].sub1
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN]].sub2
- ; GFX12-NEXT: $vgpr0 = COPY [[COPY6]]
- ; GFX12-NEXT: $vgpr1 = COPY [[COPY7]]
- ; GFX12-NEXT: $vgpr2 = COPY [[COPY8]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
+ ; GFX1200-LABEL: name: raw_buffer_load_v3f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN]].sub0
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN]].sub1
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN]].sub2
+ ; GFX1200-NEXT: $vgpr0 = COPY [[COPY6]]
+ ; GFX1200-NEXT: $vgpr1 = COPY [[COPY7]]
+ ; GFX1200-NEXT: $vgpr2 = COPY [[COPY8]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_v3f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN:%[0-9]+]]:vreg_96_align2 = BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN]].sub0
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN]].sub1
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_OFFEN]].sub2
+ ; GFX1250-NEXT: $vgpr0 = COPY [[COPY6]]
+ ; GFX1250-NEXT: $vgpr1 = COPY [[COPY7]]
+ ; GFX1250-NEXT: $vgpr2 = COPY [[COPY8]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
%val = call <3 x float> @llvm.amdgcn.raw.buffer.load.v3f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret <3 x float> %val
}
@@ -597,27 +733,49 @@ define amdgpu_ps <4 x float> @raw_buffer_load_v4f32__sgpr_rsrc__vgpr_voffset__sg
; GFX8-NEXT: $vgpr3 = COPY [[COPY9]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
;
- ; GFX12-LABEL: name: raw_buffer_load_v4f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub0
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub1
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub2
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub3
- ; GFX12-NEXT: $vgpr0 = COPY [[COPY6]]
- ; GFX12-NEXT: $vgpr1 = COPY [[COPY7]]
- ; GFX12-NEXT: $vgpr2 = COPY [[COPY8]]
- ; GFX12-NEXT: $vgpr3 = COPY [[COPY9]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+ ; GFX1200-LABEL: name: raw_buffer_load_v4f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub0
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub1
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub2
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub3
+ ; GFX1200-NEXT: $vgpr0 = COPY [[COPY6]]
+ ; GFX1200-NEXT: $vgpr1 = COPY [[COPY7]]
+ ; GFX1200-NEXT: $vgpr2 = COPY [[COPY8]]
+ ; GFX1200-NEXT: $vgpr3 = COPY [[COPY9]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_v4f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN:%[0-9]+]]:vreg_128_align2 = BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub0
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub1
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub2
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_OFFEN]].sub3
+ ; GFX1250-NEXT: $vgpr0 = COPY [[COPY6]]
+ ; GFX1250-NEXT: $vgpr1 = COPY [[COPY7]]
+ ; GFX1250-NEXT: $vgpr2 = COPY [[COPY8]]
+ ; GFX1250-NEXT: $vgpr3 = COPY [[COPY9]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
%val = call <4 x float> @llvm.amdgcn.raw.buffer.load.v4f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret <4 x float> %val
}
@@ -715,23 +873,41 @@ define amdgpu_ps <4 x half> @raw_buffer_load_v4f16__sgpr_rsrc__vgpr_voffset__sgp
; GFX8-NEXT: $vgpr1 = COPY [[COPY7]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
;
- ; GFX12-LABEL: name: raw_buffer_load_v4f16__sgpr_rsrc__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s16>), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub0
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub1
- ; GFX12-NEXT: $vgpr0 = COPY [[COPY6]]
- ; GFX12-NEXT: $vgpr1 = COPY [[COPY7]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
+ ; GFX1200-LABEL: name: raw_buffer_load_v4f16__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s16>), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub0
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub1
+ ; GFX1200-NEXT: $vgpr0 = COPY [[COPY6]]
+ ; GFX1200-NEXT: $vgpr1 = COPY [[COPY7]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_v4f16__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s16>), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub0
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_OFFEN]].sub1
+ ; GFX1250-NEXT: $vgpr0 = COPY [[COPY6]]
+ ; GFX1250-NEXT: $vgpr1 = COPY [[COPY7]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
%val = call <4 x half> @llvm.amdgcn.raw.buffer.load.v4f16(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret <4 x half> %val
}
@@ -929,52 +1105,99 @@ define amdgpu_ps half @raw_buffer_load_f16__vgpr_rsrc__vgpr_voffset__sgpr_soffse
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_OFFEN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: raw_buffer_load_f16__vgpr_rsrc__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_VBUFFER_OFFEN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: raw_buffer_load_f16__vgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_VBUFFER_OFFEN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_f16__vgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_VBUFFER_OFFEN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%val = call half @llvm.amdgcn.raw.buffer.load.f16(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret half %val
}
@@ -1028,52 +1251,99 @@ define amdgpu_ps float @raw_buffer_load_i8__vgpr_rsrc__vgpr_voffset__sgpr_soffse
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_OFFEN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: raw_buffer_load_i8__vgpr_rsrc__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[BUFFER_LOAD_UBYTE_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_UBYTE_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_VBUFFER_OFFEN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: raw_buffer_load_i8__vgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[BUFFER_LOAD_UBYTE_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_UBYTE_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_VBUFFER_OFFEN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_i8__vgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[BUFFER_LOAD_UBYTE_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_UBYTE_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_VBUFFER_OFFEN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%val = call i8 @llvm.amdgcn.raw.buffer.load.i8(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
%zext = zext i8 %val to i32
%cast = bitcast i32 %zext to float
@@ -1194,20 +1464,38 @@ define amdgpu_ps float @raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffs
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add16
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 16, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add16
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 16, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add16
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 16
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY4]], [[COPY6]], 0, implicit $exec
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%voffset = add i32 %voffset.base, 16
%val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret float %val
@@ -1229,20 +1517,38 @@ define amdgpu_ps float @raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffs
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4095
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 4095, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4095
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 4095, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4095
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY4]], [[COPY6]], 0, implicit $exec
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%voffset = add i32 %voffset.base, 4095
%val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret float %val
@@ -1267,20 +1573,38 @@ define amdgpu_ps float @raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffs
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4096
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 4096, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4096
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE]], [[COPY5]], 4096, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset__voffset_add4096
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4096
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY4]], [[COPY6]], 0, implicit $exec
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%voffset = add i32 %voffset.base, 4096
%val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret float %val
@@ -1522,54 +1846,103 @@ define amdgpu_ps float @raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffs
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset_add5000
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 5000
- ; GFX12-NEXT: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[COPY5]], [[S_MOV_B32_]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[S_ADD_I32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_1]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset_add5000
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 5000
+ ; GFX1200-NEXT: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[COPY5]], [[S_MOV_B32_]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[S_ADD_I32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_1]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_soffset_add5000
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 5000
+ ; GFX1250-NEXT: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 [[COPY5]], [[S_MOV_B32_]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[S_ADD_I32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_1]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%soffset = add i32 %soffset.base, 5000
%val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret float %val
@@ -1627,52 +2000,102 @@ define amdgpu_ps float @raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffs
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_OFFEN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add5000
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 5000, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add5000
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 5000, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_f32__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add5000
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 5000
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY4]], [[COPY6]], 0, implicit $exec
+ ; GFX1250-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_OFFEN [[V_ADD_U32_e64_]], [[REG_SEQUENCE1]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_1]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_OFFEN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%voffset = add i32 %voffset.base, 5000
%val = call float @llvm.amdgcn.raw.buffer.load.f32(<4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret float %val
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.load.tfe.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.load.tfe.ll
index 3fbfb63..4784ac5 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.load.tfe.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.load.tfe.ll
@@ -5,7 +5,8 @@
; RUN: llc -global-isel -mcpu=gfx900 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefix=GFX910
; RUN: llc -global-isel -mcpu=gfx1010 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefix=GFX910
; RUN: llc -global-isel -mcpu=gfx1100 -mattr=-real-true16 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefixes=GFX11
-; RUN: llc -global-isel -mcpu=gfx1200 -mattr=-real-true16 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefixes=GFX12
+; RUN: llc -global-isel -mcpu=gfx1200 -mattr=-real-true16 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefixes=GFX1200
+; RUN: llc -global-isel -mcpu=gfx1250 -mattr=-real-true16 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefixes=GFX1250
define amdgpu_ps void @raw_buffer_load_i8_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
; GFX67-LABEL: name: raw_buffer_load_i8_tfe
@@ -110,27 +111,49 @@ define amdgpu_ps void @raw_buffer_load_i8_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
; GFX11-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_load_i8_tfe
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET]].sub0
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET]].sub1
- ; GFX12-NEXT: GLOBAL_STORE_BYTE [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s8) into %ir.data_addr, addrspace 1)
- ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_load_i8_tfe
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX1200-NEXT: GLOBAL_STORE_BYTE [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s8) into %ir.data_addr, addrspace 1)
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_i8_tfe
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX1250-NEXT: GLOBAL_STORE_BYTE [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s8) into %ir.data_addr, addrspace 1)
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1250-NEXT: S_ENDPGM 0
%res = call { i8, i32 } @llvm.amdgcn.raw.buffer.load.sl_i8i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0)
%data = extractvalue { i8, i32 } %res, 0
store i8 %data, ptr addrspace(1) %data_addr
@@ -242,27 +265,49 @@ define amdgpu_ps void @raw_buffer_load_i16_tfe(<4 x i32> inreg %rsrc, ptr addrsp
; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
; GFX11-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_load_i16_tfe
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub0
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub1
- ; GFX12-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1)
- ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_load_i16_tfe
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX1200-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1)
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_i16_tfe
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX1250-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1)
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1250-NEXT: S_ENDPGM 0
%res = call { i16, i32 } @llvm.amdgcn.raw.buffer.load.sl_i16i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0)
%data = extractvalue { i16, i32 } %res, 0
store i16 %data, ptr addrspace(1) %data_addr
@@ -374,27 +419,49 @@ define amdgpu_ps void @raw_buffer_load_f16_tfe(<4 x i32> inreg %rsrc, ptr addrsp
; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
; GFX11-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_load_f16_tfe
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub0
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub1
- ; GFX12-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1)
- ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_load_f16_tfe
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX1200-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1)
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_f16_tfe
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX1250-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1)
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1250-NEXT: S_ENDPGM 0
%res = call { half, i32 } @llvm.amdgcn.raw.buffer.load.sl_f16i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0)
%data = extractvalue { half, i32 } %res, 0
store half %data, ptr addrspace(1) %data_addr
@@ -506,27 +573,49 @@ define amdgpu_ps void @raw_buffer_load_i32_tfe(<4 x i32> inreg %rsrc, ptr addrsp
; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
; GFX11-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_load_i32_tfe
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET]].sub0
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET]].sub1
- ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s32) into %ir.data_addr, addrspace 1)
- ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_load_i32_tfe
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s32) into %ir.data_addr, addrspace 1)
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_i32_tfe
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (store (s32) into %ir.data_addr, addrspace 1)
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1250-NEXT: S_ENDPGM 0
%res = call { i32, i32 } @llvm.amdgcn.raw.buffer.load.sl_i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0)
%data = extractvalue { i32, i32 } %res, 0
store i32 %data, ptr addrspace(1) %data_addr
@@ -646,29 +735,53 @@ define amdgpu_ps void @raw_buffer_load_v2i32_tfe(<4 x i32> inreg %rsrc, ptr addr
; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
; GFX11-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_load_v2i32_tfe
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub0
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub1
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub2
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
- ; GFX12-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1)
- ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_load_v2i32_tfe
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub2
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_v2i32_tfe
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_96_align2 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub2
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1250-NEXT: S_ENDPGM 0
%res = call { <2 x i32>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v2i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0)
%data = extractvalue { <2 x i32>, i32 } %res, 0
store <2 x i32> %data, ptr addrspace(1) %data_addr
@@ -788,29 +901,53 @@ define amdgpu_ps void @raw_buffer_load_v2f32_tfe(<4 x i32> inreg %rsrc, ptr addr
; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
; GFX11-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_load_v2f32_tfe
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub0
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub1
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub2
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
- ; GFX12-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1)
- ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_load_v2f32_tfe
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub2
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_v2f32_tfe
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_96_align2 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_OFFSET]].sub2
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1250-NEXT: S_ENDPGM 0
%res = call { <2 x float>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v2f32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0)
%data = extractvalue { <2 x float>, i32 } %res, 0
store <2 x float> %data, ptr addrspace(1) %data_addr
@@ -977,30 +1114,55 @@ define amdgpu_ps void @raw_buffer_load_v3i32_tfe(<4 x i32> inreg %rsrc, ptr addr
; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
; GFX11-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_load_v3i32_tfe
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub0
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub1
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub2
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub3
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2
- ; GFX12-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1)
- ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_load_v3i32_tfe
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub2
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub3
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1)
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_v3i32_tfe
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_128_align2 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub2
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub3
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1)
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1250-NEXT: S_ENDPGM 0
%res = call { <3 x i32>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v3i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0)
%data = extractvalue { <3 x i32>, i32 } %res, 0
store <3 x i32> %data, ptr addrspace(1) %data_addr
@@ -1167,30 +1329,55 @@ define amdgpu_ps void @raw_buffer_load_v3f32_tfe(<4 x i32> inreg %rsrc, ptr addr
; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
; GFX11-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_load_v3f32_tfe
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub0
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub1
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub2
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub3
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2
- ; GFX12-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1)
- ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_load_v3f32_tfe
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub2
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub3
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1)
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_v3f32_tfe
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_128_align2 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub2
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_OFFSET]].sub3
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1)
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1250-NEXT: S_ENDPGM 0
%res = call { <3 x float>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v3f32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0)
%data = extractvalue { <3 x float>, i32 } %res, 0
store <3 x float> %data, ptr addrspace(1) %data_addr
@@ -1318,31 +1505,57 @@ define amdgpu_ps void @raw_buffer_load_v4i32_tfe(<4 x i32> inreg %rsrc, ptr addr
; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
; GFX11-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_load_v4i32_tfe
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub0
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub1
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub2
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub3
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub4
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2, [[COPY11]], %subreg.sub3
- ; GFX12-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1)
- ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_load_v4i32_tfe
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub2
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub3
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub4
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2, [[COPY11]], %subreg.sub3
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_v4i32_tfe
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_160_align2 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub2
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub3
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub4
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2, [[COPY11]], %subreg.sub3
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1250-NEXT: S_ENDPGM 0
%res = call { <4 x i32>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v4i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0)
%data = extractvalue { <4 x i32>, i32 } %res, 0
store <4 x i32> %data, ptr addrspace(1) %data_addr
@@ -1470,31 +1683,57 @@ define amdgpu_ps void @raw_buffer_load_v4f32_tfe(<4 x i32> inreg %rsrc, ptr addr
; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
; GFX11-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_load_v4f32_tfe
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub0
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub1
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub2
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub3
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub4
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2, [[COPY11]], %subreg.sub3
- ; GFX12-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1)
- ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_load_v4f32_tfe
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub2
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub3
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub4
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2, [[COPY11]], %subreg.sub3
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_v4f32_tfe
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET:%[0-9]+]]:vreg_160_align2 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub0
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub1
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub2
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub3
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_OFFSET]].sub4
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1, [[COPY10]], %subreg.sub2, [[COPY11]], %subreg.sub3
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1250-NEXT: S_ENDPGM 0
%res = call { <4 x float>, i32 } @llvm.amdgcn.raw.buffer.load.sl_v4f32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0)
%data = extractvalue { <4 x float>, i32 } %res, 0
store <4 x float> %data, ptr addrspace(1) %data_addr
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.store.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.store.ll
index 63ca7be..c365d57 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.store.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.raw.buffer.store.ll
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX8 %s
-; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefixes=GFX12,GFX1200 %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 -stop-after=instruction-select -o - %s | FileCheck -check-prefixes=GFX12,GFX1250 %s
; FIXME: Test with SI when argument lowering not broken for f16
; Natural mapping
@@ -126,52 +127,99 @@ define amdgpu_ps void @raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr
; GFX8-NEXT: bb.5:
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE1]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE1]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE1]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: S_ENDPGM 0
call void @llvm.amdgcn.raw.buffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -309,55 +357,105 @@ define amdgpu_ps void @raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__vgpr
; GFX8-NEXT: bb.5:
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__vgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr6
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__vgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr6
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__vgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr6
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: S_ENDPGM 0
call void @llvm.amdgcn.raw.buffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -618,22 +716,39 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr
; GFX8-NEXT: BUFFER_STORE_DWORDX2_OFFEN_exact [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s32>), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f32
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f32
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f32
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
call void @llvm.amdgcn.raw.buffer.store.v2f32(<2 x float> %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -657,23 +772,41 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr
; GFX8-NEXT: BUFFER_STORE_DWORDX3_OFFEN_exact [[REG_SEQUENCE1]], [[COPY7]], [[REG_SEQUENCE]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<3 x s32>), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v3f32
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: BUFFER_STORE_DWORDX3_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY7]], [[REG_SEQUENCE]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<3 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v3f32
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: BUFFER_STORE_DWORDX3_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY7]], [[REG_SEQUENCE]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<3 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v3f32
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: BUFFER_STORE_DWORDX3_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY7]], [[REG_SEQUENCE]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<3 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
call void @llvm.amdgcn.raw.buffer.store.v3f32(<3 x float> %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -698,24 +831,43 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr
; GFX8-NEXT: BUFFER_STORE_DWORDX4_OFFEN_exact [[REG_SEQUENCE1]], [[COPY8]], [[REG_SEQUENCE]], [[COPY9]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v4f32
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY8]], [[REG_SEQUENCE]], [[COPY9]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v4f32
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY8]], [[REG_SEQUENCE]], [[COPY9]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v4f32
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY8]], [[REG_SEQUENCE]], [[COPY9]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
call void @llvm.amdgcn.raw.buffer.store.v4f32(<4 x float> %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -876,22 +1028,39 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr
; GFX8-NEXT: BUFFER_STORE_DWORDX2_OFFEN_exact [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s16>), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v4f16
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s16>), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v4f16
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s16>), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v4f16
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s16>), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
call void @llvm.amdgcn.raw.buffer.store.v4f16(<4 x half> %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -946,54 +1115,103 @@ define amdgpu_ps void @raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr
; GFX8-NEXT: bb.5:
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v4f16
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr6
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE2]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s16>), align 1, addrspace 8)
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v4f16
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr6
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE2]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s16>), align 1, addrspace 8)
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v4f16
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr6
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_OFFEN_exact [[REG_SEQUENCE1]], [[COPY6]], [[REG_SEQUENCE2]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s16>), align 1, addrspace 8)
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: S_ENDPGM 0
call void @llvm.amdgcn.raw.buffer.store.v4f16(<4 x half> %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -1080,20 +1298,38 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr
; GFX8-NEXT: BUFFER_STORE_DWORD_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 16, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_f32_voffset_add_16
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 16, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_f32_voffset_add_16
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 16, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_f32_voffset_add_16
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 16
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec
+ ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
%voffset.add = add i32 %voffset, 16
call void @llvm.amdgcn.raw.buffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset.add, i32 %soffset, i32 0)
ret void
@@ -1115,20 +1351,38 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr
; GFX8-NEXT: BUFFER_STORE_DWORD_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4095, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_f32_voffset_add_4095
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4095, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_f32_voffset_add_4095
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4095, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_f32_voffset_add_4095
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec
+ ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
%voffset.add = add i32 %voffset, 4095
call void @llvm.amdgcn.raw.buffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset.add, i32 %soffset, i32 0)
ret void
@@ -1153,20 +1407,38 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr
; GFX8-NEXT: BUFFER_STORE_DWORD_OFFEN_exact [[COPY4]], [[V_ADD_CO_U32_e64_]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_f32_voffset_add_4096
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4096, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_f32_voffset_add_4096
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4096, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_f32_voffset_add_4096
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4096
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec
+ ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
%voffset.add = add i32 %voffset, 4096
call void @llvm.amdgcn.raw.buffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset.add, i32 %soffset, i32 0)
ret void
@@ -1256,20 +1528,38 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr
; GFX8-NEXT: BUFFER_STORE_DWORD_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 16, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset_add_16
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 16, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset_add_16
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 16, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset_add_16
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 16
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec
+ ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
%voffset.add = add i32 %voffset, 16
call void @llvm.amdgcn.raw.buffer.store.v2f16(<2 x half> %val, <4 x i32> %rsrc, i32 %voffset.add, i32 %soffset, i32 0)
ret void
@@ -1291,20 +1581,38 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr
; GFX8-NEXT: BUFFER_STORE_DWORD_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4095, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset_add_4095
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4095, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset_add_4095
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4095, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset_add_4095
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec
+ ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
%voffset.add = add i32 %voffset, 4095
call void @llvm.amdgcn.raw.buffer.store.v2f16(<2 x half> %val, <4 x i32> %rsrc, i32 %voffset.add, i32 %soffset, i32 0)
ret void
@@ -1329,20 +1637,38 @@ define amdgpu_ps void @raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr
; GFX8-NEXT: BUFFER_STORE_DWORD_OFFEN_exact [[COPY4]], [[V_ADD_CO_U32_e64_]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset_add_4096
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4096, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset_add_4096
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE]], [[COPY6]], 4096, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_store__sgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_v2f16_soffset_add_4096
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4096
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec
+ ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[V_ADD_U32_e64_]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
%voffset.add = add i32 %voffset, 4096
call void @llvm.amdgcn.raw.buffer.store.v2f16(<2 x half> %val, <4 x i32> %rsrc, i32 %voffset.add, i32 %soffset, i32 0)
ret void
@@ -1400,52 +1726,102 @@ define amdgpu_ps void @raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr
; GFX8-NEXT: bb.5:
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_offset_add_5000
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE1]], [[COPY6]], 5000, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_offset_add_5000
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[COPY5]], [[REG_SEQUENCE1]], [[COPY6]], 5000, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__vgpr_voffset__sgpr_soffset_offset_add_5000
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr5
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 5000
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec
+ ; GFX1250-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY10]], [[COPY8]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFEN_exact [[COPY4]], [[V_ADD_U32_e64_]], [[REG_SEQUENCE1]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_1]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: S_ENDPGM 0
%voffset.add = add i32 %voffset, 5000
call void @llvm.amdgcn.raw.buffer.store.f32(float %val, <4 x i32> %rsrc, i32 %voffset.add, i32 %soffset, i32 0)
ret void
@@ -1501,51 +1877,97 @@ define amdgpu_ps void @raw_buffer_store__vgpr_rsrc__vgpr_val__5000_voffset__sgpr
; GFX8-NEXT: bb.5:
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__5000_voffset__sgpr_soffset_offset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 5000, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__5000_voffset__sgpr_soffset_offset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 5000, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_store__vgpr_rsrc__vgpr_val__5000_voffset__sgpr_soffset_offset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY8]], [[COPY6]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY9]], [[COPY7]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_OFFSET_exact [[COPY4]], [[REG_SEQUENCE1]], [[COPY5]], 5000, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: S_ENDPGM 0
call void @llvm.amdgcn.raw.buffer.store.f32(float %val, <4 x i32> %rsrc, i32 5000, i32 %soffset, i32 0)
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.atomic.add.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.atomic.add.ll
index 75d6c59..484639a 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.atomic.add.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.atomic.add.ll
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX8 %s
-; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX1200 %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX1250 %s
; Natural mapping
define amdgpu_ps float @struct_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(i32 %val, <4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
@@ -22,23 +23,41 @@ define amdgpu_ps float @struct_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_BOTHEN_RTN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: struct_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: struct_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%ret = call i32 @llvm.amdgcn.struct.buffer.atomic.add.i32(i32 %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
%cast = bitcast i32 %ret to float
ret float %cast
@@ -63,23 +82,41 @@ define amdgpu_ps float @struct_buffer_atomic_add_i32_noret__vgpr_val__sgpr_rsrc_
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_BOTHEN_RTN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: struct_buffer_atomic_add_i32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: struct_buffer_atomic_add_i32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_atomic_add_i32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%ret = call i32 @llvm.amdgcn.struct.buffer.atomic.add.i32(i32 %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
%cast = bitcast i32 %ret to float
ret float %cast
@@ -109,28 +146,51 @@ define amdgpu_ps <2 x float> @struct_buffer_atomic_add_i64__vgpr_val__sgpr_rsrc_
; GFX8-NEXT: $vgpr1 = COPY [[COPY10]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
;
- ; GFX12-LABEL: name: struct_buffer_atomic_add_i64__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN]].sub0
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN]].sub1
- ; GFX12-NEXT: $vgpr0 = COPY [[COPY9]]
- ; GFX12-NEXT: $vgpr1 = COPY [[COPY10]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
+ ; GFX1200-LABEL: name: struct_buffer_atomic_add_i64__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN]].sub0
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN]].sub1
+ ; GFX1200-NEXT: $vgpr0 = COPY [[COPY9]]
+ ; GFX1200-NEXT: $vgpr1 = COPY [[COPY10]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_atomic_add_i64__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64_align2 = BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN]].sub0
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN_RTN]].sub1
+ ; GFX1250-NEXT: $vgpr0 = COPY [[COPY9]]
+ ; GFX1250-NEXT: $vgpr1 = COPY [[COPY10]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
%ret = call i64 @llvm.amdgcn.struct.buffer.atomic.add.i64(i64 %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
%cast = bitcast i64 %ret to <2 x float>
ret <2 x float> %cast
@@ -156,24 +216,43 @@ define amdgpu_ps void @struct_buffer_atomic_add_i64_noret__vgpr_val__sgpr_rsrc__
; GFX8-NEXT: BUFFER_ATOMIC_ADD_X2_BOTHEN [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: struct_buffer_atomic_add_i64_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: struct_buffer_atomic_add_i64_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_atomic_add_i64_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: BUFFER_ATOMIC_ADD_X2_VBUFFER_BOTHEN [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
%ret = call i64 @llvm.amdgcn.struct.buffer.atomic.add.i64(i64 %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -236,61 +315,117 @@ define amdgpu_ps float @struct_buffer_atomic_add_i32__sgpr_val__vgpr_rsrc__sgpr_
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_BOTHEN_RTN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: struct_buffer_atomic_add_i32__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
- ; GFX12-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
- ; GFX12-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY8]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: struct_buffer_atomic_add_i32__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY8]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_atomic_add_i32__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY8]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%ret = call i32 @llvm.amdgcn.struct.buffer.atomic.add.i32(i32 %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
%cast = bitcast i32 %ret to float
ret float %cast
@@ -353,60 +488,115 @@ define amdgpu_ps void @struct_buffer_atomic_add_i32_noret__sgpr_val__vgpr_rsrc__
; GFX8-NEXT: bb.5:
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: struct_buffer_atomic_add_i32_noret__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
- ; GFX12-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
- ; GFX12-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
- ; GFX12-NEXT: BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN [[COPY8]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: struct_buffer_atomic_add_i32_noret__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
+ ; GFX1200-NEXT: BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN [[COPY8]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_atomic_add_i32_noret__sgpr_val__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY13]], [[COPY11]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY14]], [[COPY12]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY7]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
+ ; GFX1250-NEXT: BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN [[COPY8]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: S_ENDPGM 0
%ret = call i32 @llvm.amdgcn.struct.buffer.atomic.add.i32(i32 %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -431,23 +621,41 @@ define amdgpu_ps float @struct_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_BOTHEN_RTN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: struct_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 3, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: struct_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 3, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_atomic_add_i32__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vgpr_32 = BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 3, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_ATOMIC_ADD_VBUFFER_BOTHEN_RTN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%ret = call i32 @llvm.amdgcn.struct.buffer.atomic.add.i32(i32 %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 2)
%cast = bitcast i32 %ret to float
ret float %cast
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.atomic.cmpswap.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.atomic.cmpswap.ll
index c9d1227..7dab257 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.atomic.cmpswap.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.atomic.cmpswap.ll
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX8 %s
-; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX1200 %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX1250 %s
; Natural mapping
define amdgpu_ps float @struct_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset(i32 %val, i32 %cmp, <4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
@@ -25,26 +26,47 @@ define amdgpu_ps float @struct_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sg
; GFX8-NEXT: $vgpr0 = COPY [[COPY9]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: struct_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY8]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN]].sub0
- ; GFX12-NEXT: $vgpr0 = COPY [[COPY9]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: struct_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY8]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN]].sub0
+ ; GFX1200-NEXT: $vgpr0 = COPY [[COPY9]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64_align2 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY8]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN]].sub0
+ ; GFX1250-NEXT: $vgpr0 = COPY [[COPY9]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%ret = call i32 @llvm.amdgcn.struct.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
%cast = bitcast i32 %ret to float
ret float %cast
@@ -71,24 +93,43 @@ define amdgpu_ps void @struct_buffer_atomic_cmpswap_noret_i32__vgpr_val__vgpr_cm
; GFX8-NEXT: BUFFER_ATOMIC_CMPSWAP_BOTHEN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY8]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: struct_buffer_atomic_cmpswap_noret_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX12-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY8]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: struct_buffer_atomic_cmpswap_noret_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1200-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY8]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_atomic_cmpswap_noret_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1250-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY8]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
%ret = call i32 @llvm.amdgcn.struct.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -155,65 +196,125 @@ define amdgpu_ps float @struct_buffer_atomic_cmpswap_i32__sgpr_val__sgpr_cmp__vg
; GFX8-NEXT: $vgpr0 = COPY [[COPY17]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: struct_buffer_atomic_cmpswap_i32__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY1]]
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY7]]
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
- ; GFX12-NEXT: [[COPY14:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
- ; GFX12-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
- ; GFX12-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY8]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY11]], %subreg.sub0, [[COPY12]], %subreg.sub1
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY17:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN]].sub0
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: $vgpr0 = COPY [[COPY17]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: struct_buffer_atomic_cmpswap_i32__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY1]]
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY7]]
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY14:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY8]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY11]], %subreg.sub0, [[COPY12]], %subreg.sub1
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY17:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN]].sub0
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: $vgpr0 = COPY [[COPY17]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_atomic_cmpswap_i32__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY1]]
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY7]]
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY8]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY11]], %subreg.sub0, [[COPY12]], %subreg.sub1
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64_align2 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY17:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN]].sub0
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: $vgpr0 = COPY [[COPY17]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%ret = call i32 @llvm.amdgcn.struct.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
%cast = bitcast i32 %ret to float
ret float %cast
@@ -279,63 +380,121 @@ define amdgpu_ps void @struct_buffer_atomic_cmpswap_i32_noret__sgpr_val__sgpr_cm
; GFX8-NEXT: bb.5:
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: struct_buffer_atomic_cmpswap_i32_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY1]]
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY7]]
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
- ; GFX12-NEXT: [[COPY14:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
- ; GFX12-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
- ; GFX12-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY8]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY11]], %subreg.sub0, [[COPY12]], %subreg.sub1
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
- ; GFX12-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: struct_buffer_atomic_cmpswap_i32_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY1]]
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY7]]
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY14:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY8]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY11]], %subreg.sub0, [[COPY12]], %subreg.sub1
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
+ ; GFX1200-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_atomic_cmpswap_i32_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[COPY]]
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[COPY1]]
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[COPY6]]
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY7]]
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY15:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY15]], [[COPY13]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY8]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY8]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY11]], %subreg.sub0, [[COPY12]], %subreg.sub1
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
+ ; GFX1250-NEXT: BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: S_ENDPGM 0
%ret = call i32 @llvm.amdgcn.struct.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -362,26 +521,50 @@ define amdgpu_ps float @struct_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sg
; GFX8-NEXT: $vgpr0 = COPY [[COPY9]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: struct_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add4095
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY8]], 4095, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN]].sub0
- ; GFX12-NEXT: $vgpr0 = COPY [[COPY9]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: struct_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add4095
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY8]], 4095, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN]].sub0
+ ; GFX1200-NEXT: $vgpr0 = COPY [[COPY9]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_atomic_cmpswap_i32__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add4095
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY7]], [[COPY9]], 0, implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[V_ADD_U32_e64_]], %subreg.sub1
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_64_align2 = BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY8]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_ATOMIC_CMPSWAP_VBUFFER_BOTHEN_RTN]].sub0
+ ; GFX1250-NEXT: $vgpr0 = COPY [[COPY10]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%voffset = add i32 %voffset.base, 4095
%ret = call i32 @llvm.amdgcn.struct.buffer.atomic.cmpswap.i32(i32 %val, i32 %cmp, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
%cast = bitcast i32 %ret to float
@@ -420,35 +603,65 @@ define amdgpu_ps double @struct_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__s
; GFX8-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
;
- ; GFX12-LABEL: name: struct_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
- ; GFX12-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3
- ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[COPY10]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN]].sub0_sub1
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub0
- ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub1
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec
- ; GFX12-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY13]], implicit $exec
- ; GFX12-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
+ ; GFX1200-LABEL: name: struct_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
+ ; GFX1200-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3
+ ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[COPY10]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub0
+ ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub1
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec
+ ; GFX1200-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY13]], implicit $exec
+ ; GFX1200-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
+ ; GFX1250-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3
+ ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_128_align2 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[COPY10]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vreg_64_align2 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub0
+ ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub1
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec
+ ; GFX1250-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY13]], implicit $exec
+ ; GFX1250-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
%ret = call i64 @llvm.amdgcn.struct.buffer.atomic.cmpswap.i64(i64 %val, i64 %cmp, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
%cast = bitcast i64 %ret to double
ret double %cast
@@ -479,28 +692,51 @@ define amdgpu_ps void @struct_buffer_atomic_cmpswap_noret_i64__vgpr_val__vgpr_cm
; GFX8-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_BOTHEN [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: struct_buffer_atomic_cmpswap_noret_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
- ; GFX12-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3
- ; GFX12-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: struct_buffer_atomic_cmpswap_noret_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
+ ; GFX1200-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3
+ ; GFX1200-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_atomic_cmpswap_noret_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
+ ; GFX1250-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3
+ ; GFX1250-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
%ret = call i64 @llvm.amdgcn.struct.buffer.atomic.cmpswap.i64(i64 %val, i64 %cmp, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -576,74 +812,143 @@ define amdgpu_ps double @struct_buffer_atomic_cmpswap_i64__sgpr_val__sgpr_cmp__v
; GFX8-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_6]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
;
- ; GFX12-LABEL: name: struct_buffer_atomic_cmpswap_i64__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr7
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]]
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]]
- ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY8]]
- ; GFX12-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[COPY9]]
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY15:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1
- ; GFX12-NEXT: [[COPY16:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3
- ; GFX12-NEXT: [[COPY17:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1
- ; GFX12-NEXT: [[COPY18:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY17]], [[COPY15]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY18]], [[COPY16]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY10]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY13]], %subreg.sub0, [[COPY14]], %subreg.sub1
- ; GFX12-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY11]], %subreg.sub0_sub1, [[COPY12]], %subreg.sub2_sub3
- ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE5]], [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY19:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN]].sub0_sub1
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: [[COPY20:%[0-9]+]]:vgpr_32 = COPY [[COPY19]].sub0
- ; GFX12-NEXT: [[COPY21:%[0-9]+]]:vgpr_32 = COPY [[COPY19]].sub1
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_5:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY20]], implicit $exec
- ; GFX12-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_5]]
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_6:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY21]], implicit $exec
- ; GFX12-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_6]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
+ ; GFX1200-LABEL: name: struct_buffer_atomic_cmpswap_i64__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr7
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]]
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]]
+ ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY8]]
+ ; GFX1200-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[COPY9]]
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY15:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY16:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY17:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY18:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY17]], [[COPY15]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY18]], [[COPY16]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY10]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY13]], %subreg.sub0, [[COPY14]], %subreg.sub1
+ ; GFX1200-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY11]], %subreg.sub0_sub1, [[COPY12]], %subreg.sub2_sub3
+ ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE5]], [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY19:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN]].sub0_sub1
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: [[COPY20:%[0-9]+]]:vgpr_32 = COPY [[COPY19]].sub0
+ ; GFX1200-NEXT: [[COPY21:%[0-9]+]]:vgpr_32 = COPY [[COPY19]].sub1
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_5:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY20]], implicit $exec
+ ; GFX1200-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_5]]
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_6:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY21]], implicit $exec
+ ; GFX1200-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_6]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_atomic_cmpswap_i64__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr7
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]]
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE1]]
+ ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY8]]
+ ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[COPY9]]
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY15:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE2]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY16:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE2]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY17:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY18:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY17]], [[COPY15]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY18]], [[COPY16]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY10]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY13]], %subreg.sub0, [[COPY14]], %subreg.sub1
+ ; GFX1250-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY11]], %subreg.sub0_sub1, [[COPY12]], %subreg.sub2_sub3
+ ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_128_align2 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE5]], [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY19:%[0-9]+]]:vreg_64_align2 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN]].sub0_sub1
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: [[COPY20:%[0-9]+]]:vgpr_32 = COPY [[COPY19]].sub0
+ ; GFX1250-NEXT: [[COPY21:%[0-9]+]]:vgpr_32 = COPY [[COPY19]].sub1
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_5:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY20]], implicit $exec
+ ; GFX1250-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_5]]
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_6:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY21]], implicit $exec
+ ; GFX1250-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_6]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
%ret = call i64 @llvm.amdgcn.struct.buffer.atomic.cmpswap.i64(i64 %val, i64 %cmp, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
%cast = bitcast i64 %ret to double
ret double %cast
@@ -713,67 +1018,129 @@ define amdgpu_ps void @struct_buffer_atomic_cmpswap_i64_noret__sgpr_val__sgpr_cm
; GFX8-NEXT: bb.5:
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: struct_buffer_atomic_cmpswap_i64_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr7
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]]
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]]
- ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY8]]
- ; GFX12-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[COPY9]]
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY15:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1
- ; GFX12-NEXT: [[COPY16:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3
- ; GFX12-NEXT: [[COPY17:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1
- ; GFX12-NEXT: [[COPY18:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY17]], [[COPY15]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY18]], [[COPY16]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY10]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY13]], %subreg.sub0, [[COPY14]], %subreg.sub1
- ; GFX12-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY11]], %subreg.sub0_sub1, [[COPY12]], %subreg.sub2_sub3
- ; GFX12-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN [[REG_SEQUENCE5]], [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: struct_buffer_atomic_cmpswap_i64_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr7
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]]
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]]
+ ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY8]]
+ ; GFX1200-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[COPY9]]
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY15:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY16:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY17:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY18:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY17]], [[COPY15]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY18]], [[COPY16]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY10]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY13]], %subreg.sub0, [[COPY14]], %subreg.sub1
+ ; GFX1200-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY11]], %subreg.sub0_sub1, [[COPY12]], %subreg.sub2_sub3
+ ; GFX1200-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN [[REG_SEQUENCE5]], [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_atomic_cmpswap_i64_noret__sgpr_val__sgpr_cmp__vgpr_rsrc__sgpr_voffset__vgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr7
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]]
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE1]]
+ ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY8]]
+ ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[COPY9]]
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY15:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE2]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY16:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE2]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY17:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY18:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE3]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY17]], [[COPY15]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY18]], [[COPY16]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY10]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY13]], %subreg.sub0, [[COPY14]], %subreg.sub1
+ ; GFX1250-NEXT: [[REG_SEQUENCE5:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY11]], %subreg.sub0_sub1, [[COPY12]], %subreg.sub2_sub3
+ ; GFX1250-NEXT: BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN [[REG_SEQUENCE5]], [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 0, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: S_ENDPGM 0
%ret = call i64 @llvm.amdgcn.struct.buffer.atomic.cmpswap.i64(i64 %val, i64 %cmp, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -809,35 +1176,68 @@ define amdgpu_ps double @struct_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__s
; GFX8-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
;
- ; GFX12-LABEL: name: struct_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add4095
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
- ; GFX12-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3
- ; GFX12-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[COPY10]], 4095, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN]].sub0_sub1
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub0
- ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub1
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec
- ; GFX12-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY13]], implicit $exec
- ; GFX12-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
+ ; GFX1200-LABEL: name: struct_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add4095
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
+ ; GFX1200-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3
+ ; GFX1200-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_128 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[COPY10]], 4095, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vreg_64 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub0
+ ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY11]].sub1
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY12]], implicit $exec
+ ; GFX1200-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY13]], implicit $exec
+ ; GFX1200-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_atomic_cmpswap_i64__vgpr_val__vgpr_cmp__sgpr_rsrc__vgpr_voffset__sgpr_soffset_voffset_add4095
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY9]], [[COPY11]], 0, implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[V_ADD_U32_e64_]], %subreg.sub1
+ ; GFX1250-NEXT: [[REG_SEQUENCE4:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[REG_SEQUENCE]], %subreg.sub0_sub1, [[REG_SEQUENCE1]], %subreg.sub2_sub3
+ ; GFX1250-NEXT: [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN:%[0-9]+]]:vreg_128_align2 = BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[COPY10]], 0, 1, implicit $exec :: (volatile dereferenceable load store (s64), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vreg_64_align2 = COPY [[BUFFER_ATOMIC_CMPSWAP_X2_VBUFFER_BOTHEN_RTN]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY12]].sub0
+ ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:vgpr_32 = COPY [[COPY12]].sub1
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY13]], implicit $exec
+ ; GFX1250-NEXT: $sgpr0 = COPY [[V_READFIRSTLANE_B32_]]
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY14]], implicit $exec
+ ; GFX1250-NEXT: $sgpr1 = COPY [[V_READFIRSTLANE_B32_1]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
%voffset = add i32 %voffset.base, 4095
%ret = call i64 @llvm.amdgcn.struct.buffer.atomic.cmpswap.i64(i64 %val, i64 %cmp, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
%cast = bitcast i64 %ret to double
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.ll
index 9b5e46b3..dbef90f 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.ll
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX8 %s
-; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX1200 %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX1250 %s
; Natural mapping
define amdgpu_ps float @struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset(<4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
@@ -21,22 +22,39 @@ define amdgpu_ps float @struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_vof
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_BOTHEN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%val = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret float %val
}
@@ -63,25 +81,45 @@ define amdgpu_ps <2 x float> @struct_buffer_load_v2f32__sgpr_rsrc__vgpr_vindex__
; GFX8-NEXT: $vgpr1 = COPY [[COPY8]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
;
- ; GFX12-LABEL: name: struct_buffer_load_v2f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub0
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub1
- ; GFX12-NEXT: $vgpr0 = COPY [[COPY7]]
- ; GFX12-NEXT: $vgpr1 = COPY [[COPY8]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
+ ; GFX1200-LABEL: name: struct_buffer_load_v2f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub0
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub1
+ ; GFX1200-NEXT: $vgpr0 = COPY [[COPY7]]
+ ; GFX1200-NEXT: $vgpr1 = COPY [[COPY8]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_load_v2f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub0
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub1
+ ; GFX1250-NEXT: $vgpr0 = COPY [[COPY7]]
+ ; GFX1250-NEXT: $vgpr1 = COPY [[COPY8]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
%val = call <2 x float> @llvm.amdgcn.struct.buffer.load.v2f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret <2 x float> %val
}
@@ -110,27 +148,49 @@ define amdgpu_ps <3 x float> @struct_buffer_load_v3f32__sgpr_rsrc__vgpr_vindex__
; GFX8-NEXT: $vgpr2 = COPY [[COPY9]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
;
- ; GFX12-LABEL: name: struct_buffer_load_v3f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN]].sub0
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN]].sub1
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN]].sub2
- ; GFX12-NEXT: $vgpr0 = COPY [[COPY7]]
- ; GFX12-NEXT: $vgpr1 = COPY [[COPY8]]
- ; GFX12-NEXT: $vgpr2 = COPY [[COPY9]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
+ ; GFX1200-LABEL: name: struct_buffer_load_v3f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN]].sub0
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN]].sub1
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN]].sub2
+ ; GFX1200-NEXT: $vgpr0 = COPY [[COPY7]]
+ ; GFX1200-NEXT: $vgpr1 = COPY [[COPY8]]
+ ; GFX1200-NEXT: $vgpr2 = COPY [[COPY9]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_load_v3f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN:%[0-9]+]]:vreg_96_align2 = BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN]].sub0
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN]].sub1
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_VBUFFER_BOTHEN]].sub2
+ ; GFX1250-NEXT: $vgpr0 = COPY [[COPY7]]
+ ; GFX1250-NEXT: $vgpr1 = COPY [[COPY8]]
+ ; GFX1250-NEXT: $vgpr2 = COPY [[COPY9]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
%val = call <3 x float> @llvm.amdgcn.struct.buffer.load.v3f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret <3 x float> %val
}
@@ -161,29 +221,53 @@ define amdgpu_ps <4 x float> @struct_buffer_load_v4f32__sgpr_rsrc__vgpr_vindex__
; GFX8-NEXT: $vgpr3 = COPY [[COPY10]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
;
- ; GFX12-LABEL: name: struct_buffer_load_v4f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub0
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub1
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub2
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub3
- ; GFX12-NEXT: $vgpr0 = COPY [[COPY7]]
- ; GFX12-NEXT: $vgpr1 = COPY [[COPY8]]
- ; GFX12-NEXT: $vgpr2 = COPY [[COPY9]]
- ; GFX12-NEXT: $vgpr3 = COPY [[COPY10]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+ ; GFX1200-LABEL: name: struct_buffer_load_v4f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub0
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub1
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub2
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub3
+ ; GFX1200-NEXT: $vgpr0 = COPY [[COPY7]]
+ ; GFX1200-NEXT: $vgpr1 = COPY [[COPY8]]
+ ; GFX1200-NEXT: $vgpr2 = COPY [[COPY9]]
+ ; GFX1200-NEXT: $vgpr3 = COPY [[COPY10]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_load_v4f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN:%[0-9]+]]:vreg_128_align2 = BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub0
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub1
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub2
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_VBUFFER_BOTHEN]].sub3
+ ; GFX1250-NEXT: $vgpr0 = COPY [[COPY7]]
+ ; GFX1250-NEXT: $vgpr1 = COPY [[COPY8]]
+ ; GFX1250-NEXT: $vgpr2 = COPY [[COPY9]]
+ ; GFX1250-NEXT: $vgpr3 = COPY [[COPY10]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
%val = call <4 x float> @llvm.amdgcn.struct.buffer.load.v4f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret <4 x float> %val
}
@@ -208,23 +292,41 @@ define amdgpu_ps float @struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_vof
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_BOTHEN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_vindex0
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY4]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_vindex0
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY4]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_vindex0
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY4]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%val = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> %rsrc, i32 0, i32 %voffset, i32 %soffset, i32 0)
ret float %val
}
@@ -248,22 +350,42 @@ define amdgpu_ps float @struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_vof
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_BOTHEN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_voffset_add4095
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 4095, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_voffset_add4095
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 4095, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_voffset_add4095
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 4095
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 [[COPY5]], [[COPY7]], 0, implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[V_ADD_U32_e64_]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%voffset = add i32 %voffset.base, 4095
%val = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret float %val
@@ -287,22 +409,39 @@ define amdgpu_ps float @struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_vof
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_BOTHEN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_soffset_64
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 64
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_soffset_64
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 64
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_soffset_64
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 64
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%val = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 64, i32 0)
ret float %val
}
@@ -363,59 +502,113 @@ define amdgpu_ps float @struct_buffer_load_f32__vgpr_rsrc__sgpr_vindex__sgpr_vof
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_BOTHEN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: struct_buffer_load_f32__vgpr_rsrc__sgpr_vindex__sgpr_voffset__vgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY4]]
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY12]], [[COPY10]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY8]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: struct_buffer_load_f32__vgpr_rsrc__sgpr_vindex__sgpr_voffset__vgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY4]]
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY12]], [[COPY10]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY8]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_load_f32__vgpr_rsrc__sgpr_vindex__sgpr_voffset__vgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY4]]
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY1]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY2]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY3]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY11]], [[COPY9]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY12]], [[COPY10]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY8]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%val = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret float %val
}
@@ -438,22 +631,39 @@ define amdgpu_ps float @struct_buffer_load_i8_zext__sgpr_rsrc__vgpr_vindex__vgpr
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_BOTHEN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: struct_buffer_load_i8_zext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: struct_buffer_load_i8_zext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_load_i8_zext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%val = call i8 @llvm.amdgcn.struct.buffer.load.i8(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
%ext = zext i8 %val to i32
%cast = bitcast i32 %ext to float
@@ -478,22 +688,39 @@ define amdgpu_ps float @struct_buffer_load_i8_sext__sgpr_rsrc__vgpr_vindex__vgpr
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_SBYTE_BOTHEN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: struct_buffer_load_i8_sext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_SBYTE_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_SBYTE_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_SBYTE_VBUFFER_BOTHEN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: struct_buffer_load_i8_sext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_SBYTE_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_SBYTE_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_SBYTE_VBUFFER_BOTHEN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_load_i8_sext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_SBYTE_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_SBYTE_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_SBYTE_VBUFFER_BOTHEN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%val = call i8 @llvm.amdgcn.struct.buffer.load.i8(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
%ext = sext i8 %val to i32
%cast = bitcast i32 %ext to float
@@ -519,23 +746,41 @@ define amdgpu_ps float @struct_buffer_load_i8_sext_wrong_width(<4 x i32> inreg %
; GFX8-NEXT: $vgpr0 = COPY [[V_BFE_I32_e64_]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: struct_buffer_load_i8_sext_wrong_width
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
- ; GFX12-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN]], 0, 4, implicit $exec
- ; GFX12-NEXT: $vgpr0 = COPY [[V_BFE_I32_e64_]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: struct_buffer_load_i8_sext_wrong_width
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
+ ; GFX1200-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN]], 0, 4, implicit $exec
+ ; GFX1200-NEXT: $vgpr0 = COPY [[V_BFE_I32_e64_]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_load_i8_sext_wrong_width
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
+ ; GFX1250-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[BUFFER_LOAD_UBYTE_VBUFFER_BOTHEN]], 0, 4, implicit $exec
+ ; GFX1250-NEXT: $vgpr0 = COPY [[V_BFE_I32_e64_]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%val = call i8 @llvm.amdgcn.struct.buffer.load.i8(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
%trunc = trunc i8 %val to i4
%ext = sext i4 %trunc to i32
@@ -561,22 +806,39 @@ define amdgpu_ps float @struct_buffer_load_i16_zext__sgpr_rsrc__vgpr_vindex__vgp
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_BOTHEN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: struct_buffer_load_i16_zext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: struct_buffer_load_i16_zext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_load_i16_zext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%val = call i16 @llvm.amdgcn.struct.buffer.load.i16(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
%ext = zext i16 %val to i32
%cast = bitcast i32 %ext to float
@@ -601,22 +863,39 @@ define amdgpu_ps float @struct_buffer_load_i16_sext__sgpr_rsrc__vgpr_vindex__vgp
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_SSHORT_BOTHEN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: struct_buffer_load_i16_sext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_SSHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_SSHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_SSHORT_VBUFFER_BOTHEN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: struct_buffer_load_i16_sext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_SSHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_SSHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_SSHORT_VBUFFER_BOTHEN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_load_i16_sext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_SSHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_SSHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_SSHORT_VBUFFER_BOTHEN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%val = call i16 @llvm.amdgcn.struct.buffer.load.i16(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
%ext = sext i16 %val to i32
%cast = bitcast i32 %ext to float
@@ -642,23 +921,41 @@ define amdgpu_ps float @struct_buffer_load_i16_sext_wrong_width(<4 x i32> inreg
; GFX8-NEXT: $vgpr0 = COPY [[V_BFE_I32_e64_]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: struct_buffer_load_i16_sext_wrong_width
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
- ; GFX12-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN]], 0, 8, implicit $exec
- ; GFX12-NEXT: $vgpr0 = COPY [[V_BFE_I32_e64_]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: struct_buffer_load_i16_sext_wrong_width
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN]], 0, 8, implicit $exec
+ ; GFX1200-NEXT: $vgpr0 = COPY [[V_BFE_I32_e64_]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_load_i16_sext_wrong_width
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN]], 0, 8, implicit $exec
+ ; GFX1250-NEXT: $vgpr0 = COPY [[V_BFE_I32_e64_]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%val = call i16 @llvm.amdgcn.struct.buffer.load.i16(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
%trunc = trunc i16 %val to i8
%ext = sext i8 %trunc to i32
@@ -685,22 +982,39 @@ define amdgpu_ps half @struct_buffer_load_f16__sgpr_rsrc__vgpr_vindex__vgpr_voff
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_BOTHEN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: struct_buffer_load_f16__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: struct_buffer_load_f16__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_load_f16__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_VBUFFER_BOTHEN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%val = call half @llvm.amdgcn.struct.buffer.load.f16(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret half %val
}
@@ -724,22 +1038,39 @@ define amdgpu_ps <2 x half> @struct_buffer_load_v2f16__sgpr_rsrc__vgpr_vindex__v
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_BOTHEN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: struct_buffer_load_v2f16__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s16>), align 1, addrspace 8)
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: struct_buffer_load_v2f16__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s16>), align 1, addrspace 8)
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_load_v2f16__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s16>), align 1, addrspace 8)
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%val = call <2 x half> @llvm.amdgcn.struct.buffer.load.v2f16(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret <2 x half> %val
}
@@ -772,25 +1103,45 @@ define amdgpu_ps <4 x half> @struct_buffer_load_v4f16__sgpr_rsrc__vgpr_vindex__v
; GFX8-NEXT: $vgpr1 = COPY [[COPY8]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
;
- ; GFX12-LABEL: name: struct_buffer_load_v4f16__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s16>), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub0
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub1
- ; GFX12-NEXT: $vgpr0 = COPY [[COPY7]]
- ; GFX12-NEXT: $vgpr1 = COPY [[COPY8]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
+ ; GFX1200-LABEL: name: struct_buffer_load_v4f16__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s16>), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub0
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub1
+ ; GFX1200-NEXT: $vgpr0 = COPY [[COPY7]]
+ ; GFX1200-NEXT: $vgpr1 = COPY [[COPY8]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_load_v4f16__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s16>), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub0
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_VBUFFER_BOTHEN]].sub1
+ ; GFX1250-NEXT: $vgpr0 = COPY [[COPY7]]
+ ; GFX1250-NEXT: $vgpr1 = COPY [[COPY8]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
%val = call <4 x half> @llvm.amdgcn.struct.buffer.load.v4f16(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret <4 x half> %val
}
@@ -814,22 +1165,39 @@ define amdgpu_ps float @struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_vof
; GFX8-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_BOTHEN]]
; GFX8-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
;
- ; GFX12-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_glc
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 1, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; GFX12-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]]
- ; GFX12-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ; GFX1200-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_glc
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 1, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]]
+ ; GFX1200-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_glc
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_VBUFFER_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 1, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_VBUFFER_BOTHEN]]
+ ; GFX1250-NEXT: SI_RETURN_TO_EPILOG implicit $vgpr0
%val = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 1)
ret float %val
}
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.tfe.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.tfe.ll
index 674fe1c..39cce20 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.tfe.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.tfe.ll
@@ -5,7 +5,8 @@
; RUN: llc -global-isel -mcpu=gfx900 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefix=GFX910
; RUN: llc -global-isel -mcpu=gfx1010 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefix=GFX910
; RUN: llc -global-isel -mcpu=gfx1100 -mattr=-real-true16 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefixes=GFX11
-; RUN: llc -global-isel -mcpu=gfx1200 -mattr=-real-true16 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefixes=GFX12
+; RUN: llc -global-isel -mcpu=gfx1200 -mattr=-real-true16 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefixes=GFX1200
+; RUN: llc -global-isel -mcpu=gfx1250 -mattr=-real-true16 -mtriple=amdgcn-- -stop-after=instruction-select < %s | FileCheck %s -check-prefixes=GFX1250
define amdgpu_ps void @raw_buffer_load_i8_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
; GFX67-LABEL: name: raw_buffer_load_i8_tfe
@@ -114,29 +115,53 @@ define amdgpu_ps void @raw_buffer_load_i8_tfe(<4 x i32> inreg %rsrc, ptr addrspa
; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
; GFX11-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_load_i8_tfe
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
- ; GFX12-NEXT: [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN]].sub0
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN]].sub1
- ; GFX12-NEXT: GLOBAL_STORE_BYTE [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s8) into %ir.data_addr, addrspace 1)
- ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_load_i8_tfe
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1200-NEXT: [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX1200-NEXT: GLOBAL_STORE_BYTE [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s8) into %ir.data_addr, addrspace 1)
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_i8_tfe
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s8), addrspace 8)
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_UBYTE_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX1250-NEXT: GLOBAL_STORE_BYTE [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s8) into %ir.data_addr, addrspace 1)
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1250-NEXT: S_ENDPGM 0
%res = call { i8, i32 } @llvm.amdgcn.struct.buffer.load.sl_i8i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
%data = extractvalue { i8, i32 } %res, 0
store i8 %data, ptr addrspace(1) %data_addr
@@ -252,29 +277,53 @@ define amdgpu_ps void @raw_buffer_load_i16_tfe(<4 x i32> inreg %rsrc, ptr addrsp
; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
; GFX11-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_load_i16_tfe
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
- ; GFX12-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub0
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub1
- ; GFX12-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1)
- ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_load_i16_tfe
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1200-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX1200-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1)
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_i16_tfe
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX1250-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1)
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1250-NEXT: S_ENDPGM 0
%res = call { i16, i32 } @llvm.amdgcn.struct.buffer.load.sl_i16i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
%data = extractvalue { i16, i32 } %res, 0
store i16 %data, ptr addrspace(1) %data_addr
@@ -390,29 +439,53 @@ define amdgpu_ps void @raw_buffer_load_f16_tfe(<4 x i32> inreg %rsrc, ptr addrsp
; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
; GFX11-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_load_f16_tfe
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
- ; GFX12-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub0
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub1
- ; GFX12-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1)
- ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_load_f16_tfe
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1200-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX1200-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1)
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_f16_tfe
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s16), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_USHORT_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX1250-NEXT: GLOBAL_STORE_SHORT [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s16) into %ir.data_addr, addrspace 1)
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1250-NEXT: S_ENDPGM 0
%res = call { half, i32 } @llvm.amdgcn.struct.buffer.load.sl_f16i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
%data = extractvalue { half, i32 } %res, 0
store half %data, ptr addrspace(1) %data_addr
@@ -528,29 +601,53 @@ define amdgpu_ps void @raw_buffer_load_i32_tfe(<4 x i32> inreg %rsrc, ptr addrsp
; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
; GFX11-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_load_i32_tfe
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN]].sub0
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN]].sub1
- ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.data_addr, addrspace 1)
- ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_load_i32_tfe
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.data_addr, addrspace 1)
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_i32_tfe
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_64_align2 = BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORD_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE1]], [[COPY9]], 0, 0, implicit $exec :: (store (s32) into %ir.data_addr, addrspace 1)
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY10]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1250-NEXT: S_ENDPGM 0
%res = call { i32, i32 } @llvm.amdgcn.struct.buffer.load.sl_i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
%data = extractvalue { i32, i32 } %res, 0
store i32 %data, ptr addrspace(1) %data_addr
@@ -674,31 +771,57 @@ define amdgpu_ps void @raw_buffer_load_v2i32_tfe(<4 x i32> inreg %rsrc, ptr addr
; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
; GFX11-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_load_v2i32_tfe
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub0
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub1
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub2
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
- ; GFX12-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1)
- ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_load_v2i32_tfe
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub2
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_v2i32_tfe
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_96_align2 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub2
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1250-NEXT: S_ENDPGM 0
%res = call { <2 x i32>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v2i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
%data = extractvalue { <2 x i32>, i32 } %res, 0
store <2 x i32> %data, ptr addrspace(1) %data_addr
@@ -822,31 +945,57 @@ define amdgpu_ps void @raw_buffer_load_v2f32_tfe(<4 x i32> inreg %rsrc, ptr addr
; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
; GFX11-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_load_v2f32_tfe
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub0
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub1
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub2
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
- ; GFX12-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1)
- ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_load_v2f32_tfe
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub2
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_v2f32_tfe
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_96_align2 = BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<2 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_TFE_VBUFFER_IDXEN]].sub2
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<2 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY11]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1250-NEXT: S_ENDPGM 0
%res = call { <2 x float>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v2f32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
%data = extractvalue { <2 x float>, i32 } %res, 0
store <2 x float> %data, ptr addrspace(1) %data_addr
@@ -1018,32 +1167,59 @@ define amdgpu_ps void @raw_buffer_load_v3i32_tfe(<4 x i32> inreg %rsrc, ptr addr
; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
; GFX11-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_load_v3i32_tfe
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub0
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub1
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub2
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub3
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2
- ; GFX12-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1)
- ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_load_v3i32_tfe
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub2
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub3
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1)
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_v3i32_tfe
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_128_align2 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub2
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub3
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1)
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1250-NEXT: S_ENDPGM 0
%res = call { <3 x i32>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v3i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
%data = extractvalue { <3 x i32>, i32 } %res, 0
store <3 x i32> %data, ptr addrspace(1) %data_addr
@@ -1215,32 +1391,59 @@ define amdgpu_ps void @raw_buffer_load_v3f32_tfe(<4 x i32> inreg %rsrc, ptr addr
; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
; GFX11-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_load_v3f32_tfe
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub0
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub1
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub2
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub3
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2
- ; GFX12-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1)
- ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_load_v3f32_tfe
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub2
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub3
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1)
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_v3f32_tfe
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_128_align2 = BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<3 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub2
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_TFE_VBUFFER_IDXEN]].sub3
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORDX3 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<3 x s32>) into %ir.data_addr, align 16, addrspace 1)
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY12]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1250-NEXT: S_ENDPGM 0
%res = call { <3 x float>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v3f32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
%data = extractvalue { <3 x float>, i32 } %res, 0
store <3 x float> %data, ptr addrspace(1) %data_addr
@@ -1372,33 +1575,61 @@ define amdgpu_ps void @raw_buffer_load_v4i32_tfe(<4 x i32> inreg %rsrc, ptr addr
; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY13]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
; GFX11-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_load_v4i32_tfe
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub0
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub1
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub2
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub3
- ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub4
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2, [[COPY12]], %subreg.sub3
- ; GFX12-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1)
- ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY13]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_load_v4i32_tfe
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub2
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub3
+ ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub4
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2, [[COPY12]], %subreg.sub3
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY13]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_v4i32_tfe
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_160_align2 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub2
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub3
+ ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub4
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2, [[COPY12]], %subreg.sub3
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY13]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1250-NEXT: S_ENDPGM 0
%res = call { <4 x i32>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v4i32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
%data = extractvalue { <4 x i32>, i32 } %res, 0
store <4 x i32> %data, ptr addrspace(1) %data_addr
@@ -1530,33 +1761,61 @@ define amdgpu_ps void @raw_buffer_load_v4f32_tfe(<4 x i32> inreg %rsrc, ptr addr
; GFX11-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY13]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
; GFX11-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: raw_buffer_load_v4f32_tfe
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
- ; GFX12-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub0
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub1
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub2
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub3
- ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub4
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2, [[COPY12]], %subreg.sub3
- ; GFX12-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1)
- ; GFX12-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY13]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: raw_buffer_load_v4f32_tfe
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1200-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_160 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub2
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub3
+ ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub4
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2, [[COPY12]], %subreg.sub3
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY13]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: raw_buffer_load_v4f32_tfe
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; GFX1250-NEXT: [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN:%[0-9]+]]:vreg_160_align2 = BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN [[COPY8]], [[REG_SEQUENCE]], $sgpr_null, 0, 0, 0, implicit $exec :: (dereferenceable load (<4 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub0
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub1
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub2
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub3
+ ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_TFE_VBUFFER_IDXEN]].sub4
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY9]], %subreg.sub0, [[COPY10]], %subreg.sub1, [[COPY11]], %subreg.sub2, [[COPY12]], %subreg.sub3
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORDX4 [[REG_SEQUENCE1]], [[REG_SEQUENCE3]], 0, 0, implicit $exec :: (store (<4 x s32>) into %ir.data_addr, addrspace 1)
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD [[REG_SEQUENCE2]], [[COPY13]], 0, 0, implicit $exec :: (store (s32) into %ir.tfe_addr, addrspace 1)
+ ; GFX1250-NEXT: S_ENDPGM 0
%res = call { <4 x float>, i32 } @llvm.amdgcn.struct.buffer.load.sl_v4f32i32s(<4 x i32> %rsrc, i32 0, i32 0, i32 0, i32 0)
%data = extractvalue { <4 x float>, i32 } %res, 0
store <4 x float> %data, ptr addrspace(1) %data_addr
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.store.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.store.ll
index 8183d85..c9771b5 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.store.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.store.ll
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx810 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX8 %s
-; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX1200 %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 -stop-after=instruction-select -o - %s | FileCheck -check-prefix=GFX1250 %s
; Natural mapping
define amdgpu_ps void @struct_buffer_store_f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset(float %val, <4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
@@ -21,22 +22,39 @@ define amdgpu_ps void @struct_buffer_store_f32_sgpr_rsrc__vgpr_val__vgpr_vindex_
; GFX8-NEXT: BUFFER_STORE_DWORD_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: struct_buffer_store_f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
- ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: struct_buffer_store_f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
+ ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_store_f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
+ ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
call void @llvm.amdgcn.struct.buffer.store.f32(float %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -61,24 +79,43 @@ define amdgpu_ps void @struct_buffer_store_v2f32_sgpr_rsrc__vgpr_val__vgpr_vinde
; GFX8-NEXT: BUFFER_STORE_DWORDX2_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s32>), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: struct_buffer_store_v2f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: struct_buffer_store_v2f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_store_v2f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
call void @llvm.amdgcn.struct.buffer.store.v2f32(<2 x float> %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -104,25 +141,45 @@ define amdgpu_ps void @struct_buffer_store_v3f32_sgpr_rsrc__vgpr_val__vgpr_vinde
; GFX8-NEXT: BUFFER_STORE_DWORDX3_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY9]], 0, 0, 0, implicit $exec :: (dereferenceable store (<3 x s32>), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: struct_buffer_store_v3f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY4]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY6]], %subreg.sub3
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY8]], %subreg.sub1
- ; GFX12-NEXT: BUFFER_STORE_DWORDX3_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY9]], 0, 0, 0, implicit $exec :: (dereferenceable store (<3 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: struct_buffer_store_v3f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_96 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY4]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY6]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY8]], %subreg.sub1
+ ; GFX1200-NEXT: BUFFER_STORE_DWORDX3_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY9]], 0, 0, 0, implicit $exec :: (dereferenceable store (<3 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_store_v3f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_96_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY4]], %subreg.sub1, [[COPY5]], %subreg.sub2, [[COPY6]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY8]], %subreg.sub1
+ ; GFX1250-NEXT: BUFFER_STORE_DWORDX3_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY9]], 0, 0, 0, implicit $exec :: (dereferenceable store (<3 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
call void @llvm.amdgcn.struct.buffer.store.v3f32(<3 x float> %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -149,26 +206,47 @@ define amdgpu_ps void @struct_buffer_store_v4f32_sgpr_rsrc__vgpr_val__vgpr_vinde
; GFX8-NEXT: BUFFER_STORE_DWORDX4_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY10]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: struct_buffer_store_v4f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
- ; GFX12-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY10]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: struct_buffer_store_v4f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
+ ; GFX1200-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY10]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_store_v4f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:vgpr_32 = COPY $vgpr5
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY8]], %subreg.sub0, [[COPY9]], %subreg.sub1
+ ; GFX1250-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY10]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
call void @llvm.amdgcn.struct.buffer.store.v4f32(<4 x float> %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -233,64 +311,123 @@ define amdgpu_ps void @struct_buffer_store_v4f32_vgpr_rsrc__sgpr_val__sgpr_vinde
; GFX8-NEXT: bb.5:
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: struct_buffer_store_v4f32_vgpr_rsrc__sgpr_val__sgpr_vindex__sgpr_voffset__vgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: successors: %bb.2(0x80000000)
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr7
- ; GFX12-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY $vgpr4
- ; GFX12-NEXT: [[COPY11:%[0-9]+]]:vreg_128 = COPY [[REG_SEQUENCE]]
- ; GFX12-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY8]]
- ; GFX12-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY9]]
- ; GFX12-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.2:
- ; GFX12-NEXT: successors: %bb.3(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
- ; GFX12-NEXT: [[COPY14:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
- ; GFX12-NEXT: [[COPY15:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
- ; GFX12-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1
- ; GFX12-NEXT: [[COPY17:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY17]], [[COPY15]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
- ; GFX12-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]], implicit $exec
- ; GFX12-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY10]], implicit $exec
- ; GFX12-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
- ; GFX12-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.3:
- ; GFX12-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY12]], %subreg.sub0, [[COPY13]], %subreg.sub1
- ; GFX12-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_BOTHEN_exact [[COPY11]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 8)
- ; GFX12-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
- ; GFX12-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.4:
- ; GFX12-NEXT: successors: %bb.5(0x80000000)
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: bb.5:
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: struct_buffer_store_v4f32_vgpr_rsrc__sgpr_val__sgpr_vindex__sgpr_voffset__vgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr7
+ ; GFX1200-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1200-NEXT: [[COPY11:%[0-9]+]]:vreg_128 = COPY [[REG_SEQUENCE]]
+ ; GFX1200-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY8]]
+ ; GFX1200-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY9]]
+ ; GFX1200-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.2:
+ ; GFX1200-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY14:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY15:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1200-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1
+ ; GFX1200-NEXT: [[COPY17:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY17]], [[COPY15]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]], implicit $exec
+ ; GFX1200-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY10]], implicit $exec
+ ; GFX1200-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1200-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.3:
+ ; GFX1200-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY12]], %subreg.sub0, [[COPY13]], %subreg.sub1
+ ; GFX1200-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_BOTHEN_exact [[COPY11]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 8)
+ ; GFX1200-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1200-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.4:
+ ; GFX1200-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: bb.5:
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_store_v4f32_vgpr_rsrc__sgpr_val__sgpr_vindex__sgpr_voffset__vgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: successors: %bb.2(0x80000000)
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $sgpr7, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1, [[COPY6]], %subreg.sub2, [[COPY7]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[COPY9:%[0-9]+]]:sreg_32 = COPY $sgpr7
+ ; GFX1250-NEXT: [[COPY10:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; GFX1250-NEXT: [[COPY11:%[0-9]+]]:vreg_128_align2 = COPY [[REG_SEQUENCE]]
+ ; GFX1250-NEXT: [[COPY12:%[0-9]+]]:vgpr_32 = COPY [[COPY8]]
+ ; GFX1250-NEXT: [[COPY13:%[0-9]+]]:vgpr_32 = COPY [[COPY9]]
+ ; GFX1250-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_MOV_B32 $exec_lo
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.2:
+ ; GFX1250-NEXT: successors: %bb.3(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY4]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY5]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY7]], implicit $exec
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY14:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE1]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY15:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE1]].sub2_sub3
+ ; GFX1250-NEXT: [[COPY16:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub0_sub1
+ ; GFX1250-NEXT: [[COPY17:%[0-9]+]]:sreg_64 = COPY [[REG_SEQUENCE2]].sub2_sub3
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY16]], [[COPY14]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U64_e64 [[COPY17]], [[COPY15]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[V_CMP_EQ_U64_e64_]], [[V_CMP_EQ_U64_e64_1]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]], implicit $exec
+ ; GFX1250-NEXT: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_32_xm0_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY10]], implicit $exec
+ ; GFX1250-NEXT: [[S_AND_B32_1:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_B32 [[S_AND_B32_]], [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
+ ; GFX1250-NEXT: [[S_AND_SAVEEXEC_B32_:%[0-9]+]]:sreg_32_xm0_xexec = S_AND_SAVEEXEC_B32 killed [[S_AND_B32_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.3:
+ ; GFX1250-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY12]], %subreg.sub0, [[COPY13]], %subreg.sub1
+ ; GFX1250-NEXT: BUFFER_STORE_DWORDX4_VBUFFER_BOTHEN_exact [[COPY11]], [[REG_SEQUENCE3]], [[REG_SEQUENCE2]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s32>), align 1, addrspace 8)
+ ; GFX1250-NEXT: $exec_lo = S_XOR_B32_term $exec_lo, [[S_AND_SAVEEXEC_B32_]], implicit-def $scc
+ ; GFX1250-NEXT: SI_WATERFALL_LOOP %bb.2, implicit $exec
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.4:
+ ; GFX1250-NEXT: successors: %bb.5(0x80000000)
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: $exec_lo = S_MOV_B32_term [[S_MOV_B32_]]
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: bb.5:
+ ; GFX1250-NEXT: S_ENDPGM 0
call void @llvm.amdgcn.struct.buffer.store.v4f32(<4 x float> %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -313,22 +450,39 @@ define amdgpu_ps void @struct_buffer_store_i8_sgpr_rsrc__vgpr_val__vgpr_vindex__
; GFX8-NEXT: BUFFER_STORE_BYTE_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s8), addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: struct_buffer_store_i8_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
- ; GFX12-NEXT: BUFFER_STORE_BYTE_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s8), addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: struct_buffer_store_i8_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
+ ; GFX1200-NEXT: BUFFER_STORE_BYTE_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s8), addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_store_i8_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
+ ; GFX1250-NEXT: BUFFER_STORE_BYTE_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s8), addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
%val.trunc = trunc i32 %val to i8
call void @llvm.amdgcn.struct.buffer.store.i8(i8 %val.trunc, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret void
@@ -352,22 +506,39 @@ define amdgpu_ps void @struct_buffer_store_i16_sgpr_rsrc__vgpr_val__vgpr_vindex_
; GFX8-NEXT: BUFFER_STORE_SHORT_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s16), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: struct_buffer_store_i16_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
- ; GFX12-NEXT: BUFFER_STORE_SHORT_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s16), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: struct_buffer_store_i16_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
+ ; GFX1200-NEXT: BUFFER_STORE_SHORT_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s16), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_store_i16_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
+ ; GFX1250-NEXT: BUFFER_STORE_SHORT_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (s16), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
%val.trunc = trunc i32 %val to i16
call void @llvm.amdgcn.struct.buffer.store.i16(i16 %val.trunc, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret void
@@ -391,22 +562,39 @@ define amdgpu_ps void @struct_buffer_store_f32_sgpr_rsrc__vgpr_val__vgpr_vindex_
; GFX8-NEXT: BUFFER_STORE_DWORD_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: struct_buffer_store_f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset_glc
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
- ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: struct_buffer_store_f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset_glc
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
+ ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_store_f32_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset_glc
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
+ ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 1, 0, implicit $exec :: (dereferenceable store (s32), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
call void @llvm.amdgcn.struct.buffer.store.f32(float %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 1)
ret void
}
@@ -429,22 +617,39 @@ define amdgpu_ps void @struct_buffer_store_v2f16_sgpr_rsrc__vgpr_val__vgpr_vinde
; GFX8-NEXT: BUFFER_STORE_DWORD_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: struct_buffer_store_v2f16_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
- ; GFX12-NEXT: BUFFER_STORE_DWORD_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: struct_buffer_store_v2f16_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
+ ; GFX1200-NEXT: BUFFER_STORE_DWORD_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_store_v2f16_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1, [[COPY3]], %subreg.sub2, [[COPY4]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY5]], %subreg.sub0, [[COPY6]], %subreg.sub1
+ ; GFX1250-NEXT: BUFFER_STORE_DWORD_VBUFFER_BOTHEN_exact [[COPY]], [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY7]], 0, 0, 0, implicit $exec :: (dereferenceable store (<2 x s16>), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
call void @llvm.amdgcn.struct.buffer.store.v2f16(<2 x half> %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -475,24 +680,43 @@ define amdgpu_ps void @struct_buffer_store_v4f16_sgpr_rsrc__vgpr_val__vgpr_vinde
; GFX8-NEXT: BUFFER_STORE_DWORDX2_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s16>), align 1, addrspace 8)
; GFX8-NEXT: S_ENDPGM 0
;
- ; GFX12-LABEL: name: struct_buffer_store_v4f16_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
- ; GFX12: bb.1 (%ir-block.0):
- ; GFX12-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
- ; GFX12-NEXT: {{ $}}
- ; GFX12-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
- ; GFX12-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
- ; GFX12-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
- ; GFX12-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
- ; GFX12-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
- ; GFX12-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
- ; GFX12-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
- ; GFX12-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
- ; GFX12-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
- ; GFX12-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
- ; GFX12-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
- ; GFX12-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
- ; GFX12-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s16>), align 1, addrspace 8)
- ; GFX12-NEXT: S_ENDPGM 0
+ ; GFX1200-LABEL: name: struct_buffer_store_v4f16_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1200: bb.1 (%ir-block.0):
+ ; GFX1200-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1200-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1200-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1200-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1200-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1200-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1200-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1200-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1200-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1200-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1200-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1200-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1200-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s16>), align 1, addrspace 8)
+ ; GFX1200-NEXT: S_ENDPGM 0
+ ;
+ ; GFX1250-LABEL: name: struct_buffer_store_v4f16_sgpr_rsrc__vgpr_val__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; GFX1250: bb.1 (%ir-block.0):
+ ; GFX1250-NEXT: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; GFX1250-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; GFX1250-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
+ ; GFX1250-NEXT: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; GFX1250-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; GFX1250-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; GFX1250-NEXT: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; GFX1250-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY3]], %subreg.sub1, [[COPY4]], %subreg.sub2, [[COPY5]], %subreg.sub3
+ ; GFX1250-NEXT: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; GFX1250-NEXT: [[COPY7:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; GFX1250-NEXT: [[COPY8:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; GFX1250-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY7]], %subreg.sub1
+ ; GFX1250-NEXT: BUFFER_STORE_DWORDX2_VBUFFER_BOTHEN_exact [[REG_SEQUENCE]], [[REG_SEQUENCE2]], [[REG_SEQUENCE1]], [[COPY8]], 0, 0, 0, implicit $exec :: (dereferenceable store (<4 x s16>), align 1, addrspace 8)
+ ; GFX1250-NEXT: S_ENDPGM 0
call void @llvm.amdgcn.struct.buffer.store.v4f16(<4 x half> %val, <4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/atomics-system-scope.ll b/llvm/test/CodeGen/AMDGPU/atomics-system-scope.ll
new file mode 100644
index 0000000..5fc9f4a
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/atomics-system-scope.ll
@@ -0,0 +1,1486 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN:llc -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck --check-prefix=GFX1250 %s
+
+define float @global_system_atomic_fadd_f32(ptr addrspace(1) %ptr, float %val) {
+; GFX1250-LABEL: global_system_atomic_fadd_f32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_add_f32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, float %val monotonic
+ ret float %result
+}
+
+define float @global_one_as_atomic_fadd_f32(ptr addrspace(1) %ptr, float %val) {
+; GFX1250-LABEL: global_one_as_atomic_fadd_f32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_add_f32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, float %val syncscope("one-as") monotonic
+ ret float %result
+}
+
+define double @global_system_atomic_fadd_f64(ptr addrspace(1) %ptr, double %val) {
+; GFX1250-LABEL: global_system_atomic_fadd_f64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_add_f64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val monotonic
+ ret double %result
+}
+
+define double @global_one_as_atomic_fadd_f64(ptr addrspace(1) %ptr, double %val) {
+; GFX1250-LABEL: global_one_as_atomic_fadd_f64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_add_f64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %val syncscope("one-as") monotonic
+ ret double %result
+}
+
+define float @global_system_atomic_fmin_f32(ptr addrspace(1) %ptr, float %val) {
+; GFX1250-LABEL: global_system_atomic_fmin_f32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_min_num_f32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, float %val monotonic
+ ret float %result
+}
+
+define float @global_one_as_atomic_fmin_f32(ptr addrspace(1) %ptr, float %val) {
+; GFX1250-LABEL: global_one_as_atomic_fmin_f32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_min_num_f32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, float %val syncscope("one-as") monotonic
+ ret float %result
+}
+
+define double @global_system_atomic_fmin_f64(ptr addrspace(1) %ptr, double %val) {
+; GFX1250-LABEL: global_system_atomic_fmin_f64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_min_num_f64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double %val monotonic
+ ret double %result
+}
+
+define double @global_one_as_atomic_fmin_f64(ptr addrspace(1) %ptr, double %val) {
+; GFX1250-LABEL: global_one_as_atomic_fmin_f64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_min_num_f64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double %val syncscope("one-as") monotonic
+ ret double %result
+}
+
+define float @global_system_atomic_fmax_f32(ptr addrspace(1) %ptr, float %val) {
+; GFX1250-LABEL: global_system_atomic_fmax_f32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_max_num_f32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, float %val monotonic
+ ret float %result
+}
+
+define float @global_one_as_atomic_fmax_f32(ptr addrspace(1) %ptr, float %val) {
+; GFX1250-LABEL: global_one_as_atomic_fmax_f32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_max_num_f32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, float %val syncscope("one-as") monotonic
+ ret float %result
+}
+
+define double @global_system_atomic_fmax_f64(ptr addrspace(1) %ptr, double %val) {
+; GFX1250-LABEL: global_system_atomic_fmax_f64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_max_num_f64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double %val monotonic
+ ret double %result
+}
+
+define double @global_one_as_atomic_fmax_f64(ptr addrspace(1) %ptr, double %val) {
+; GFX1250-LABEL: global_one_as_atomic_fmax_f64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_max_num_f64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double %val syncscope("one-as") monotonic
+ ret double %result
+}
+
+define i32 @global_one_as_atomic_min_i32(ptr addrspace(1) %ptr, i32 %val) {
+; GFX1250-LABEL: global_one_as_atomic_min_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_min_i32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw min ptr addrspace(1) %ptr, i32 %val syncscope("one-as") monotonic
+ ret i32 %result
+}
+
+define i32 @global_system_atomic_min_i32(ptr addrspace(1) %ptr, i32 %val) {
+; GFX1250-LABEL: global_system_atomic_min_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_min_i32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw min ptr addrspace(1) %ptr, i32 %val monotonic
+ ret i32 %result
+}
+
+define i32 @global_one_as_atomic_max_i32(ptr addrspace(1) %ptr, i32 %val) {
+; GFX1250-LABEL: global_one_as_atomic_max_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_max_i32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw max ptr addrspace(1) %ptr, i32 %val syncscope("one-as") monotonic
+ ret i32 %result
+}
+
+define i32 @global_system_atomic_max_i32(ptr addrspace(1) %ptr, i32 %val) {
+; GFX1250-LABEL: global_system_atomic_max_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_max_i32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw max ptr addrspace(1) %ptr, i32 %val monotonic
+ ret i32 %result
+}
+
+define i32 @global_one_as_atomic_umin_i32(ptr addrspace(1) %ptr, i32 %val) {
+; GFX1250-LABEL: global_one_as_atomic_umin_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_min_u32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw umin ptr addrspace(1) %ptr, i32 %val syncscope("one-as") monotonic
+ ret i32 %result
+}
+
+define i32 @global_system_atomic_umin_i32(ptr addrspace(1) %ptr, i32 %val) {
+; GFX1250-LABEL: global_system_atomic_umin_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_min_u32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw umin ptr addrspace(1) %ptr, i32 %val monotonic
+ ret i32 %result
+}
+
+define i32 @global_one_as_atomic_umax_i32(ptr addrspace(1) %ptr, i32 %val) {
+; GFX1250-LABEL: global_one_as_atomic_umax_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_max_u32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw umax ptr addrspace(1) %ptr, i32 %val syncscope("one-as") monotonic
+ ret i32 %result
+}
+
+define i32 @global_system_atomic_umax_i32(ptr addrspace(1) %ptr, i32 %val) {
+; GFX1250-LABEL: global_system_atomic_umax_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_max_u32 v0, v[0:1], v2, off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw umax ptr addrspace(1) %ptr, i32 %val monotonic
+ ret i32 %result
+}
+
+define i64 @global_one_as_atomic_min_i64(ptr addrspace(1) %ptr, i64 %val) {
+; GFX1250-LABEL: global_one_as_atomic_min_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_min_i64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw min ptr addrspace(1) %ptr, i64 %val syncscope("one-as") monotonic
+ ret i64 %result
+}
+
+define i64 @global_system_atomic_min_i64(ptr addrspace(1) %ptr, i64 %val) {
+; GFX1250-LABEL: global_system_atomic_min_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_min_i64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw min ptr addrspace(1) %ptr, i64 %val monotonic
+ ret i64 %result
+}
+
+define i64 @global_one_as_atomic_max_i64(ptr addrspace(1) %ptr, i64 %val) {
+; GFX1250-LABEL: global_one_as_atomic_max_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_max_i64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw max ptr addrspace(1) %ptr, i64 %val syncscope("one-as") monotonic
+ ret i64 %result
+}
+
+define i64 @global_system_atomic_max_i64(ptr addrspace(1) %ptr, i64 %val) {
+; GFX1250-LABEL: global_system_atomic_max_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_max_i64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw max ptr addrspace(1) %ptr, i64 %val monotonic
+ ret i64 %result
+}
+
+define i64 @global_one_as_atomic_umin_i64(ptr addrspace(1) %ptr, i64 %val) {
+; GFX1250-LABEL: global_one_as_atomic_umin_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_min_u64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw umin ptr addrspace(1) %ptr, i64 %val syncscope("one-as") monotonic
+ ret i64 %result
+}
+
+define i64 @global_system_atomic_umin_i64(ptr addrspace(1) %ptr, i64 %val) {
+; GFX1250-LABEL: global_system_atomic_umin_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_min_u64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw umin ptr addrspace(1) %ptr, i64 %val monotonic
+ ret i64 %result
+}
+
+define i64 @global_one_as_atomic_umax_i64(ptr addrspace(1) %ptr, i64 %val) {
+; GFX1250-LABEL: global_one_as_atomic_umax_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_max_u64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw umax ptr addrspace(1) %ptr, i64 %val syncscope("one-as") monotonic
+ ret i64 %result
+}
+
+define i64 @global_system_atomic_umax_i64(ptr addrspace(1) %ptr, i64 %val) {
+; GFX1250-LABEL: global_system_atomic_umax_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: global_atomic_max_u64 v[0:1], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw umax ptr addrspace(1) %ptr, i64 %val monotonic
+ ret i64 %result
+}
+
+define i16 @global_one_as_atomic_min_i16(ptr addrspace(1) %ptr, i16 %val) {
+; GFX1250-LABEL: global_one_as_atomic_min_i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v3, v0
+; GFX1250-NEXT: s_mov_b32 s0, 0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX1250-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX1250-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX1250-NEXT: global_load_b32 v5, v[0:1], off
+; GFX1250-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_not_b32_e32 v4, v4
+; GFX1250-NEXT: .LBB28_1: ; %atomicrmw.start
+; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v7, v5
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_lshrrev_b32_e32 v5, v3, v7
+; GFX1250-NEXT: v_min_i16 v5, v5, v2
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX1250-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_and_or_b32 v6, v7, v4, v5
+; GFX1250-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[6:7], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB28_1
+; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw min ptr addrspace(1) %ptr, i16 %val syncscope("one-as") monotonic
+ ret i16 %result
+}
+
+define i16 @global_one_as_atomic_umin_i16(ptr addrspace(1) %ptr, i16 %val) {
+; GFX1250-LABEL: global_one_as_atomic_umin_i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v3, v0
+; GFX1250-NEXT: s_mov_b32 s0, 0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX1250-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX1250-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX1250-NEXT: global_load_b32 v5, v[0:1], off
+; GFX1250-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_not_b32_e32 v4, v4
+; GFX1250-NEXT: .LBB29_1: ; %atomicrmw.start
+; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v7, v5
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_lshrrev_b32_e32 v5, v3, v7
+; GFX1250-NEXT: v_min_u16 v5, v5, v2
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX1250-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_and_or_b32 v6, v7, v4, v5
+; GFX1250-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[6:7], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB29_1
+; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw umin ptr addrspace(1) %ptr, i16 %val syncscope("one-as") monotonic
+ ret i16 %result
+}
+
+define i16 @global_one_as_atomic_max_i16(ptr addrspace(1) %ptr, i16 %val) {
+; GFX1250-LABEL: global_one_as_atomic_max_i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v3, v0
+; GFX1250-NEXT: s_mov_b32 s0, 0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX1250-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX1250-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX1250-NEXT: global_load_b32 v5, v[0:1], off
+; GFX1250-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_not_b32_e32 v4, v4
+; GFX1250-NEXT: .LBB30_1: ; %atomicrmw.start
+; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v7, v5
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_lshrrev_b32_e32 v5, v3, v7
+; GFX1250-NEXT: v_max_i16 v5, v5, v2
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX1250-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_and_or_b32 v6, v7, v4, v5
+; GFX1250-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[6:7], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB30_1
+; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw max ptr addrspace(1) %ptr, i16 %val syncscope("one-as") monotonic
+ ret i16 %result
+}
+
+define i16 @global_one_as_atomic_umax_i16(ptr addrspace(1) %ptr, i16 %val) {
+; GFX1250-LABEL: global_one_as_atomic_umax_i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v3, v0
+; GFX1250-NEXT: s_mov_b32 s0, 0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX1250-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX1250-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX1250-NEXT: global_load_b32 v5, v[0:1], off
+; GFX1250-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_not_b32_e32 v4, v4
+; GFX1250-NEXT: .LBB31_1: ; %atomicrmw.start
+; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v7, v5
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_lshrrev_b32_e32 v5, v3, v7
+; GFX1250-NEXT: v_max_u16 v5, v5, v2
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX1250-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_and_or_b32 v6, v7, v4, v5
+; GFX1250-NEXT: global_atomic_cmpswap_b32 v5, v[0:1], v[6:7], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB31_1
+; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw umax ptr addrspace(1) %ptr, i16 %val syncscope("one-as") monotonic
+ ret i16 %result
+}
+
+define float @flat_system_atomic_fadd_f32(ptr %ptr, float %val) {
+; GFX1250-LABEL: flat_system_atomic_fadd_f32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: flat_atomic_add_f32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fadd ptr %ptr, float %val monotonic
+ ret float %result
+}
+
+define float @flat_one_as_atomic_fadd_f32(ptr %ptr, float %val) {
+; GFX1250-LABEL: flat_one_as_atomic_fadd_f32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: flat_atomic_add_f32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fadd ptr %ptr, float %val syncscope("one-as") monotonic
+ ret float %result
+}
+
+define double @flat_system_atomic_fadd_f64(ptr %ptr, double %val) {
+; GFX1250-LABEL: flat_system_atomic_fadd_f64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_mov_b64 s[0:1], src_shared_base
+; GFX1250-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB34_6
+; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.check.private
+; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_hi
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_xor_b32_e32 v4, s1, v1
+; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-NEXT: s_and_saveexec_b32 s1, vcc_lo
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_xor_b32 s1, exec_lo, s1
+; GFX1250-NEXT: s_cbranch_execz .LBB34_3
+; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.global
+; GFX1250-NEXT: global_atomic_add_f64 v[4:5], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-NEXT: .LBB34_3: ; %Flow
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_and_not1_saveexec_b32 s1, s1
+; GFX1250-NEXT: s_cbranch_execz .LBB34_5
+; GFX1250-NEXT: ; %bb.4: ; %atomicrmw.private
+; GFX1250-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s2, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo
+; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_add_f64_e32 v[0:1], v[4:5], v[2:3]
+; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE
+; GFX1250-NEXT: .LBB34_5: ; %Flow1
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-NEXT: .LBB34_6: ; %Flow2
+; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB34_8
+; GFX1250-NEXT: ; %bb.7: ; %atomicrmw.shared
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc_lo
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: ds_add_rtn_f64 v[4:5], v0, v[2:3]
+; GFX1250-NEXT: .LBB34_8: ; %atomicrmw.phi
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fadd ptr %ptr, double %val monotonic
+ ret double %result
+}
+
+define double @flat_one_as_atomic_fadd_f64(ptr %ptr, double %val) {
+; GFX1250-LABEL: flat_one_as_atomic_fadd_f64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_mov_b64 s[0:1], src_shared_base
+; GFX1250-NEXT: s_mov_b32 s0, exec_lo
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-NEXT: v_cmpx_ne_u32_e64 s1, v1
+; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB35_6
+; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.check.private
+; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_hi
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_xor_b32_e32 v4, s1, v1
+; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-NEXT: s_and_saveexec_b32 s1, vcc_lo
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_xor_b32 s1, exec_lo, s1
+; GFX1250-NEXT: s_cbranch_execz .LBB35_3
+; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.global
+; GFX1250-NEXT: global_atomic_add_f64 v[4:5], v[0:1], v[2:3], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-NEXT: .LBB35_3: ; %Flow
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_and_not1_saveexec_b32 s1, s1
+; GFX1250-NEXT: s_cbranch_execz .LBB35_5
+; GFX1250-NEXT: ; %bb.4: ; %atomicrmw.private
+; GFX1250-NEXT: s_mov_b32 s2, src_flat_scratch_base_lo
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s2, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo
+; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_add_f64_e32 v[0:1], v[4:5], v[2:3]
+; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE
+; GFX1250-NEXT: .LBB35_5: ; %Flow1
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s1
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-NEXT: .LBB35_6: ; %Flow2
+; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB35_8
+; GFX1250-NEXT: ; %bb.7: ; %atomicrmw.shared
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc_lo
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: ds_add_rtn_f64 v[4:5], v0, v[2:3]
+; GFX1250-NEXT: .LBB35_8: ; %atomicrmw.phi
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fadd ptr %ptr, double %val syncscope("one-as") monotonic
+ ret double %result
+}
+
+define float @flat_system_atomic_fmin_f32(ptr %ptr, float %val) {
+; GFX1250-LABEL: flat_system_atomic_fmin_f32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: flat_atomic_min_num_f32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fmin ptr %ptr, float %val monotonic
+ ret float %result
+}
+
+define float @flat_one_as_atomic_fmin_f32(ptr %ptr, float %val) {
+; GFX1250-LABEL: flat_one_as_atomic_fmin_f32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: flat_atomic_min_num_f32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fmin ptr %ptr, float %val syncscope("one-as") monotonic
+ ret float %result
+}
+
+define double @flat_system_atomic_fmin_f64(ptr %ptr, double %val) {
+; GFX1250-LABEL: flat_system_atomic_fmin_f64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1
+; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB38_2
+; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global
+; GFX1250-NEXT: flat_atomic_min_num_f64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-NEXT: .LBB38_2: ; %Flow
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB38_4
+; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private
+; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_dual_max_num_f64 v[2:3], v[2:3], v[2:3] :: v_dual_cndmask_b32 v6, -1, v4, vcc_lo
+; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5]
+; GFX1250-NEXT: v_min_num_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE
+; GFX1250-NEXT: .LBB38_4: ; %atomicrmw.phi
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fmin ptr %ptr, double %val monotonic
+ ret double %result
+}
+
+define double @flat_one_as_atomic_fmin_f64(ptr %ptr, double %val) {
+; GFX1250-LABEL: flat_one_as_atomic_fmin_f64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1
+; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB39_2
+; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global
+; GFX1250-NEXT: flat_atomic_min_num_f64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-NEXT: .LBB39_2: ; %Flow
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB39_4
+; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private
+; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_dual_max_num_f64 v[2:3], v[2:3], v[2:3] :: v_dual_cndmask_b32 v6, -1, v4, vcc_lo
+; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5]
+; GFX1250-NEXT: v_min_num_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE
+; GFX1250-NEXT: .LBB39_4: ; %atomicrmw.phi
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fmin ptr %ptr, double %val syncscope("one-as") monotonic
+ ret double %result
+}
+
+define float @flat_system_atomic_fmax_f32(ptr %ptr, float %val) {
+; GFX1250-LABEL: flat_system_atomic_fmax_f32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: flat_atomic_max_num_f32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fmax ptr %ptr, float %val monotonic
+ ret float %result
+}
+
+define float @flat_one_as_atomic_fmax_f32(ptr %ptr, float %val) {
+; GFX1250-LABEL: flat_one_as_atomic_fmax_f32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: flat_atomic_max_num_f32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fmax ptr %ptr, float %val syncscope("one-as") monotonic
+ ret float %result
+}
+
+define double @flat_system_atomic_fmax_f64(ptr %ptr, double %val) {
+; GFX1250-LABEL: flat_system_atomic_fmax_f64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1
+; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB42_2
+; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global
+; GFX1250-NEXT: flat_atomic_max_num_f64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-NEXT: .LBB42_2: ; %Flow
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB42_4
+; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private
+; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_dual_max_num_f64 v[2:3], v[2:3], v[2:3] :: v_dual_cndmask_b32 v6, -1, v4, vcc_lo
+; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5]
+; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE
+; GFX1250-NEXT: .LBB42_4: ; %atomicrmw.phi
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fmax ptr %ptr, double %val monotonic
+ ret double %result
+}
+
+define double @flat_one_as_atomic_fmax_f64(ptr %ptr, double %val) {
+; GFX1250-LABEL: flat_one_as_atomic_fmax_f64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1
+; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB43_2
+; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global
+; GFX1250-NEXT: flat_atomic_max_num_f64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-NEXT: .LBB43_2: ; %Flow
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB43_4
+; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private
+; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_dual_max_num_f64 v[2:3], v[2:3], v[2:3] :: v_dual_cndmask_b32 v6, -1, v4, vcc_lo
+; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[4:5], v[4:5]
+; GFX1250-NEXT: v_max_num_f64_e32 v[0:1], v[0:1], v[2:3]
+; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE
+; GFX1250-NEXT: .LBB43_4: ; %atomicrmw.phi
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw fmax ptr %ptr, double %val syncscope("one-as") monotonic
+ ret double %result
+}
+
+define i32 @flat_one_as_atomic_min_i32(ptr %ptr, i32 %val) {
+; GFX1250-LABEL: flat_one_as_atomic_min_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: flat_atomic_min_i32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw min ptr %ptr, i32 %val syncscope("one-as") monotonic
+ ret i32 %result
+}
+
+define i32 @flat_system_atomic_min_i32(ptr %ptr, i32 %val) {
+; GFX1250-LABEL: flat_system_atomic_min_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: flat_atomic_min_i32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw min ptr %ptr, i32 %val monotonic
+ ret i32 %result
+}
+
+define i32 @flat_one_as_atomic_max_i32(ptr %ptr, i32 %val) {
+; GFX1250-LABEL: flat_one_as_atomic_max_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: flat_atomic_max_i32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw max ptr %ptr, i32 %val syncscope("one-as") monotonic
+ ret i32 %result
+}
+
+define i32 @flat_system_atomic_max_i32(ptr %ptr, i32 %val) {
+; GFX1250-LABEL: flat_system_atomic_max_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: flat_atomic_max_i32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw max ptr %ptr, i32 %val monotonic
+ ret i32 %result
+}
+
+define i32 @flat_one_as_atomic_umin_i32(ptr %ptr, i32 %val) {
+; GFX1250-LABEL: flat_one_as_atomic_umin_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: flat_atomic_min_u32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw umin ptr %ptr, i32 %val syncscope("one-as") monotonic
+ ret i32 %result
+}
+
+define i32 @flat_system_atomic_umin_i32(ptr %ptr, i32 %val) {
+; GFX1250-LABEL: flat_system_atomic_umin_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: flat_atomic_min_u32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw umin ptr %ptr, i32 %val monotonic
+ ret i32 %result
+}
+
+define i32 @flat_one_as_atomic_umax_i32(ptr %ptr, i32 %val) {
+; GFX1250-LABEL: flat_one_as_atomic_umax_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: flat_atomic_max_u32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw umax ptr %ptr, i32 %val syncscope("one-as") monotonic
+ ret i32 %result
+}
+
+define i32 @flat_system_atomic_umax_i32(ptr %ptr, i32 %val) {
+; GFX1250-LABEL: flat_system_atomic_umax_i32:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: flat_atomic_max_u32 v0, v[0:1], v2 th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw umax ptr %ptr, i32 %val monotonic
+ ret i32 %result
+}
+
+define i64 @flat_one_as_atomic_min_i64(ptr %ptr, i64 %val) {
+; GFX1250-LABEL: flat_one_as_atomic_min_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1
+; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB52_2
+; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global
+; GFX1250-NEXT: flat_atomic_min_i64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-NEXT: .LBB52_2: ; %Flow
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB52_4
+; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private
+; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo
+; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_min_i64 v[0:1], v[4:5], v[2:3]
+; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE
+; GFX1250-NEXT: .LBB52_4: ; %atomicrmw.phi
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw min ptr %ptr, i64 %val syncscope("one-as") monotonic
+ ret i64 %result
+}
+
+define i64 @flat_system_atomic_min_i64(ptr %ptr, i64 %val) {
+; GFX1250-LABEL: flat_system_atomic_min_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1
+; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB53_2
+; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global
+; GFX1250-NEXT: flat_atomic_min_i64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-NEXT: .LBB53_2: ; %Flow
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB53_4
+; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private
+; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo
+; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_min_i64 v[0:1], v[4:5], v[2:3]
+; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE
+; GFX1250-NEXT: .LBB53_4: ; %atomicrmw.phi
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw min ptr %ptr, i64 %val monotonic
+ ret i64 %result
+}
+
+define i64 @flat_one_as_atomic_max_i64(ptr %ptr, i64 %val) {
+; GFX1250-LABEL: flat_one_as_atomic_max_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1
+; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB54_2
+; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global
+; GFX1250-NEXT: flat_atomic_max_i64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-NEXT: .LBB54_2: ; %Flow
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB54_4
+; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private
+; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo
+; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_max_i64 v[0:1], v[4:5], v[2:3]
+; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE
+; GFX1250-NEXT: .LBB54_4: ; %atomicrmw.phi
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw max ptr %ptr, i64 %val syncscope("one-as") monotonic
+ ret i64 %result
+}
+
+define i64 @flat_system_atomic_max_i64(ptr %ptr, i64 %val) {
+; GFX1250-LABEL: flat_system_atomic_max_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1
+; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB55_2
+; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global
+; GFX1250-NEXT: flat_atomic_max_i64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-NEXT: .LBB55_2: ; %Flow
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB55_4
+; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private
+; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo
+; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_max_i64 v[0:1], v[4:5], v[2:3]
+; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE
+; GFX1250-NEXT: .LBB55_4: ; %atomicrmw.phi
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw max ptr %ptr, i64 %val monotonic
+ ret i64 %result
+}
+
+define i64 @flat_one_as_atomic_umin_i64(ptr %ptr, i64 %val) {
+; GFX1250-LABEL: flat_one_as_atomic_umin_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1
+; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB56_2
+; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global
+; GFX1250-NEXT: flat_atomic_min_u64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-NEXT: .LBB56_2: ; %Flow
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB56_4
+; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private
+; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo
+; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_min_u64 v[0:1], v[4:5], v[2:3]
+; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE
+; GFX1250-NEXT: .LBB56_4: ; %atomicrmw.phi
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw umin ptr %ptr, i64 %val syncscope("one-as") monotonic
+ ret i64 %result
+}
+
+define i64 @flat_system_atomic_umin_i64(ptr %ptr, i64 %val) {
+; GFX1250-LABEL: flat_system_atomic_umin_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1
+; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB57_2
+; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global
+; GFX1250-NEXT: flat_atomic_min_u64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-NEXT: .LBB57_2: ; %Flow
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB57_4
+; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private
+; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo
+; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_min_u64 v[0:1], v[4:5], v[2:3]
+; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE
+; GFX1250-NEXT: .LBB57_4: ; %atomicrmw.phi
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw umin ptr %ptr, i64 %val monotonic
+ ret i64 %result
+}
+
+define i64 @flat_one_as_atomic_umax_i64(ptr %ptr, i64 %val) {
+; GFX1250-LABEL: flat_one_as_atomic_umax_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1
+; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB58_2
+; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global
+; GFX1250-NEXT: flat_atomic_max_u64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-NEXT: .LBB58_2: ; %Flow
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB58_4
+; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private
+; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo
+; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_max_u64 v[0:1], v[4:5], v[2:3]
+; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE
+; GFX1250-NEXT: .LBB58_4: ; %atomicrmw.phi
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw umax ptr %ptr, i64 %val syncscope("one-as") monotonic
+ ret i64 %result
+}
+
+define i64 @flat_system_atomic_umax_i64(ptr %ptr, i64 %val) {
+; GFX1250-LABEL: flat_system_atomic_umax_i64:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_mov_b32 s0, src_flat_scratch_base_hi
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_xor_b32_e32 v4, s0, v1
+; GFX1250-NEXT: v_cmp_lt_u32_e32 vcc_lo, 0x3ffffff, v4
+; GFX1250-NEXT: ; implicit-def: $vgpr4_vgpr5
+; GFX1250-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_xor_b32 s0, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB59_2
+; GFX1250-NEXT: ; %bb.1: ; %atomicrmw.global
+; GFX1250-NEXT: flat_atomic_max_u64 v[4:5], v[0:1], v[2:3] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3
+; GFX1250-NEXT: .LBB59_2: ; %Flow
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_and_not1_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_cbranch_execz .LBB59_4
+; GFX1250-NEXT: ; %bb.3: ; %atomicrmw.private
+; GFX1250-NEXT: s_mov_b32 s1, src_flat_scratch_base_lo
+; GFX1250-NEXT: v_cmp_ne_u64_e32 vcc_lo, 0, v[0:1]
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_subrev_nc_u32_e32 v4, s1, v0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cndmask_b32_e32 v6, -1, v4, vcc_lo
+; GFX1250-NEXT: scratch_load_b64 v[4:5], v6, off
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_max_u64 v[0:1], v[4:5], v[2:3]
+; GFX1250-NEXT: scratch_store_b64 v6, v[0:1], off scope:SCOPE_SE
+; GFX1250-NEXT: .LBB59_4: ; %atomicrmw.phi
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v0, v4 :: v_dual_mov_b32 v1, v5
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw umax ptr %ptr, i64 %val monotonic
+ ret i64 %result
+}
+
+define i16 @flat_one_as_atomic_min_i16(ptr %ptr, i16 %val) {
+; GFX1250-LABEL: flat_one_as_atomic_min_i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v3, v0
+; GFX1250-NEXT: s_mov_b32 s0, 0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX1250-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX1250-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX1250-NEXT: flat_load_b32 v5, v[0:1]
+; GFX1250-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_not_b32_e32 v4, v4
+; GFX1250-NEXT: .LBB60_1: ; %atomicrmw.start
+; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v7, v5
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_lshrrev_b32_e32 v5, v3, v7
+; GFX1250-NEXT: v_min_i16 v5, v5, v2
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX1250-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_and_or_b32 v6, v7, v4, v5
+; GFX1250-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[6:7] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB60_1
+; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw min ptr %ptr, i16 %val syncscope("one-as") monotonic
+ ret i16 %result
+}
+
+define i16 @flat_one_as_atomic_umin_i16(ptr %ptr, i16 %val) {
+; GFX1250-LABEL: flat_one_as_atomic_umin_i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v3, v0
+; GFX1250-NEXT: s_mov_b32 s0, 0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX1250-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX1250-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX1250-NEXT: flat_load_b32 v5, v[0:1]
+; GFX1250-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_not_b32_e32 v4, v4
+; GFX1250-NEXT: .LBB61_1: ; %atomicrmw.start
+; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v7, v5
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_lshrrev_b32_e32 v5, v3, v7
+; GFX1250-NEXT: v_min_u16 v5, v5, v2
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX1250-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_and_or_b32 v6, v7, v4, v5
+; GFX1250-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[6:7] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB61_1
+; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw umin ptr %ptr, i16 %val syncscope("one-as") monotonic
+ ret i16 %result
+}
+
+define i16 @flat_one_as_atomic_max_i16(ptr %ptr, i16 %val) {
+; GFX1250-LABEL: flat_one_as_atomic_max_i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v3, v0
+; GFX1250-NEXT: s_mov_b32 s0, 0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX1250-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX1250-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX1250-NEXT: flat_load_b32 v5, v[0:1]
+; GFX1250-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_not_b32_e32 v4, v4
+; GFX1250-NEXT: .LBB62_1: ; %atomicrmw.start
+; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v7, v5
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_lshrrev_b32_e32 v5, v3, v7
+; GFX1250-NEXT: v_max_i16 v5, v5, v2
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX1250-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_and_or_b32 v6, v7, v4, v5
+; GFX1250-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[6:7] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB62_1
+; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw max ptr %ptr, i16 %val syncscope("one-as") monotonic
+ ret i16 %result
+}
+
+define i16 @flat_one_as_atomic_umax_i16(ptr %ptr, i16 %val) {
+; GFX1250-LABEL: flat_one_as_atomic_umax_i16:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v3, v0
+; GFX1250-NEXT: s_mov_b32 s0, 0
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v0, -4, v3
+; GFX1250-NEXT: v_and_b32_e32 v3, 3, v3
+; GFX1250-NEXT: v_lshlrev_b32_e32 v3, 3, v3
+; GFX1250-NEXT: flat_load_b32 v5, v[0:1]
+; GFX1250-NEXT: v_lshlrev_b32_e64 v4, v3, 0xffff
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_not_b32_e32 v4, v4
+; GFX1250-NEXT: .LBB63_1: ; %atomicrmw.start
+; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_mov_b32_e32 v7, v5
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_lshrrev_b32_e32 v5, v3, v7
+; GFX1250-NEXT: v_max_u16 v5, v5, v2
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; GFX1250-NEXT: v_lshlrev_b32_e32 v5, v3, v5
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_and_or_b32 v6, v7, v4, v5
+; GFX1250-NEXT: flat_atomic_cmpswap_b32 v5, v[0:1], v[6:7] th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: v_cmp_eq_u32_e32 vcc_lo, v5, v7
+; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB63_1
+; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end
+; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: v_lshrrev_b32_e32 v0, v3, v5
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %result = atomicrmw umax ptr %ptr, i16 %val syncscope("one-as") monotonic
+ ret i16 %result
+}
diff --git a/llvm/test/CodeGen/AMDGPU/bf16-math.ll b/llvm/test/CodeGen/AMDGPU/bf16-math.ll
index 9979e83..3a82f84 100644
--- a/llvm/test/CodeGen/AMDGPU/bf16-math.ll
+++ b/llvm/test/CodeGen/AMDGPU/bf16-math.ll
@@ -368,11 +368,11 @@ define amdgpu_ps float @test_clamp_v2bf16_s(<2 x bfloat> inreg %src) {
define amdgpu_ps bfloat @test_clamp_bf16_folding(bfloat %src) {
; GCN-LABEL: test_clamp_bf16_folding:
; GCN: ; %bb.0:
-; GCN-NEXT: v_exp_bf16_e32 v0, v0
-; GCN-NEXT: v_nop
-; GCN-NEXT: s_delay_alu instid0(TRANS32_DEP_1)
-; GCN-NEXT: v_pk_max_num_bf16 v0, v0, v0 clamp
+; GCN-NEXT: v_exp_bf16_e64 v0, v0 clamp
; GCN-NEXT: ; return to shader part epilog
+
+
+
%exp = call bfloat @llvm.exp2.bf16(bfloat %src)
%max = call bfloat @llvm.maxnum.bf16(bfloat %exp, bfloat 0.0)
%clamp = call bfloat @llvm.minnum.bf16(bfloat %max, bfloat 1.0)
@@ -382,10 +382,11 @@ define amdgpu_ps bfloat @test_clamp_bf16_folding(bfloat %src) {
define amdgpu_ps float @test_clamp_v2bf16_folding(<2 x bfloat> %src0, <2 x bfloat> %src1) {
; GCN-LABEL: test_clamp_v2bf16_folding:
; GCN: ; %bb.0:
-; GCN-NEXT: v_pk_mul_bf16 v0, v0, v1
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_pk_max_num_bf16 v0, v0, v0 clamp
+; GCN-NEXT: v_pk_mul_bf16 v0, v0, v1 clamp
; GCN-NEXT: ; return to shader part epilog
+
+
+
%mul = fmul <2 x bfloat> %src0, %src1
%max = call <2 x bfloat> @llvm.maxnum.v2bf16(<2 x bfloat> %mul, <2 x bfloat> <bfloat 0.0, bfloat 0.0>)
%clamp = call <2 x bfloat> @llvm.minnum.v2bf16(<2 x bfloat> %max, <2 x bfloat> <bfloat 1.0, bfloat 1.0>)
@@ -396,11 +397,12 @@ define amdgpu_ps float @test_clamp_v2bf16_folding(<2 x bfloat> %src0, <2 x bfloa
define amdgpu_ps void @v_test_mul_add_v2bf16_vvv(ptr addrspace(1) %out, <2 x bfloat> %a, <2 x bfloat> %b, <2 x bfloat> %c) {
; GCN-LABEL: v_test_mul_add_v2bf16_vvv:
; GCN: ; %bb.0:
-; GCN-NEXT: v_pk_mul_bf16 v2, v2, v3
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_pk_add_bf16 v2, v2, v4
+; GCN-NEXT: v_pk_fma_bf16 v2, v2, v3, v4
; GCN-NEXT: global_store_b32 v[0:1], v2, off
; GCN-NEXT: s_endpgm
+
+
+
%mul = fmul contract <2 x bfloat> %a, %b
%add = fadd contract <2 x bfloat> %mul, %c
store <2 x bfloat> %add, ptr addrspace(1) %out
@@ -410,11 +412,12 @@ define amdgpu_ps void @v_test_mul_add_v2bf16_vvv(ptr addrspace(1) %out, <2 x bfl
define amdgpu_ps void @v_test_mul_add_v2bf16_vss(ptr addrspace(1) %out, <2 x bfloat> %a, <2 x bfloat> inreg %b, <2 x bfloat> inreg %c) {
; GCN-LABEL: v_test_mul_add_v2bf16_vss:
; GCN: ; %bb.0:
-; GCN-NEXT: v_pk_mul_bf16 v2, v2, s0
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_pk_add_bf16 v2, v2, s1
+; GCN-NEXT: v_pk_fma_bf16 v2, v2, s0, s1
; GCN-NEXT: global_store_b32 v[0:1], v2, off
; GCN-NEXT: s_endpgm
+
+
+
%mul = fmul contract <2 x bfloat> %a, %b
%add = fadd contract <2 x bfloat> %mul, %c
store <2 x bfloat> %add, ptr addrspace(1) %out
@@ -424,11 +427,14 @@ define amdgpu_ps void @v_test_mul_add_v2bf16_vss(ptr addrspace(1) %out, <2 x bfl
define amdgpu_ps void @v_test_mul_add_v2bf16_sss(ptr addrspace(1) %out, <2 x bfloat> inreg %a, <2 x bfloat> inreg %b, <2 x bfloat> inreg %c) {
; GCN-LABEL: v_test_mul_add_v2bf16_sss:
; GCN: ; %bb.0:
-; GCN-NEXT: v_pk_mul_bf16 v2, s0, s1
+; GCN-NEXT: v_mov_b32_e32 v2, s2
; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_pk_add_bf16 v2, v2, s2
+; GCN-NEXT: v_pk_fma_bf16 v2, s0, s1, v2
; GCN-NEXT: global_store_b32 v[0:1], v2, off
; GCN-NEXT: s_endpgm
+
+
+
%mul = fmul contract <2 x bfloat> %a, %b
%add = fadd contract <2 x bfloat> %mul, %c
store <2 x bfloat> %add, ptr addrspace(1) %out
@@ -438,11 +444,12 @@ define amdgpu_ps void @v_test_mul_add_v2bf16_sss(ptr addrspace(1) %out, <2 x bfl
define amdgpu_ps void @v_test_mul_add_v2bf16_vsc(ptr addrspace(1) %out, <2 x bfloat> %a, <2 x bfloat> inreg %b) {
; GCN-LABEL: v_test_mul_add_v2bf16_vsc:
; GCN: ; %bb.0:
-; GCN-NEXT: v_pk_mul_bf16 v2, v2, s0
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_pk_add_bf16 v2, v2, 0.5 op_sel_hi:[1,0]
+; GCN-NEXT: v_pk_fma_bf16 v2, v2, s0, 0.5 op_sel_hi:[1,1,0]
; GCN-NEXT: global_store_b32 v[0:1], v2, off
; GCN-NEXT: s_endpgm
+
+
+
%mul = fmul contract <2 x bfloat> %a, %b
%add = fadd contract <2 x bfloat> %mul, <bfloat 0.5, bfloat 0.5>
store <2 x bfloat> %add, ptr addrspace(1) %out
@@ -452,11 +459,14 @@ define amdgpu_ps void @v_test_mul_add_v2bf16_vsc(ptr addrspace(1) %out, <2 x bfl
define amdgpu_ps void @v_test_mul_add_v2bf16_vll(ptr addrspace(1) %out, <2 x bfloat> %a) {
; GCN-LABEL: v_test_mul_add_v2bf16_vll:
; GCN: ; %bb.0:
-; GCN-NEXT: v_pk_mul_bf16 v2, 0x42c83f80, v2
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_pk_add_bf16 v2, 0x43484000, v2
+; GCN-NEXT: s_mov_b32 s0, 0x43484000
+; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GCN-NEXT: v_pk_fma_bf16 v2, 0x42c83f80, v2, s0
; GCN-NEXT: global_store_b32 v[0:1], v2, off
; GCN-NEXT: s_endpgm
+
+
+
%mul = fmul contract <2 x bfloat> %a, <bfloat 1.0, bfloat 100.0>
%add = fadd contract <2 x bfloat> %mul, <bfloat 2.0, bfloat 200.0>
store <2 x bfloat> %add, ptr addrspace(1) %out
diff --git a/llvm/test/CodeGen/AMDGPU/bf16.ll b/llvm/test/CodeGen/AMDGPU/bf16.ll
index 52e697c..8f8ea13 100644
--- a/llvm/test/CodeGen/AMDGPU/bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/bf16.ll
@@ -47043,18 +47043,10 @@ define bfloat @v_fmuladd_bf16(bfloat %a, bfloat %b, bfloat %c) {
; GFX8-LABEL: v_fmuladd_bf16:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX8-NEXT: v_mul_f32_e32 v0, v0, v1
-; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
-; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
-; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v0
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
-; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v3, vcc
-; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v2
-; GFX8-NEXT: v_add_f32_e32 v0, v0, v1
+; GFX8-NEXT: v_fma_f32 v0, v0, v1, v2
; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
@@ -47067,20 +47059,13 @@ define bfloat @v_fmuladd_bf16(bfloat %a, bfloat %b, bfloat %c) {
; GFX900-LABEL: v_fmuladd_bf16:
; GFX900: ; %bb.0:
; GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX900-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; GFX900-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX900-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX900-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX900-NEXT: v_fma_f32 v0, v0, v1, v2
; GFX900-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX900-NEXT: s_movk_i32 s4, 0x7fff
; GFX900-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX900-NEXT: v_or_b32_e32 v3, 0x400000, v0
-; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
-; GFX900-NEXT: v_cndmask_b32_e32 v0, v1, v3, vcc
-; GFX900-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX900-NEXT: v_lshlrev_b32_e32 v1, 16, v2
-; GFX900-NEXT: v_add_f32_e32 v0, v0, v1
-; GFX900-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX900-NEXT: v_add3_u32 v1, v1, v0, s4
; GFX900-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX900-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
@@ -47090,35 +47075,25 @@ define bfloat @v_fmuladd_bf16(bfloat %a, bfloat %b, bfloat %c) {
; GFX950-LABEL: v_fmuladd_bf16:
; GFX950: ; %bb.0:
; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX950-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX950-NEXT: v_mul_f32_e32 v0, v0, v1
-; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0
-; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v2
-; GFX950-NEXT: v_add_f32_e32 v0, v0, v1
-; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0
+; GFX950-NEXT: v_fmac_f32_e32 v2, v0, v1
+; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v2, s0
; GFX950-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: v_fmuladd_bf16:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX10-NEXT: v_mul_f32_e32 v0, v0, v1
-; GFX10-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v3, 0x400000, v0
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX10-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v3, vcc_lo
-; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v2
-; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT: v_add_f32_e32 v0, v0, v1
-; GFX10-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v2, 0x400000, v0
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX10-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX10-NEXT: v_fmac_f32_e32 v2, v0, v1
+; GFX10-NEXT: v_bfe_u32 v0, v2, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v1, 0x400000, v2
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX10-NEXT: v_add3_u32 v0, v0, v2, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc_lo
; GFX10-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
@@ -47126,55 +47101,38 @@ define bfloat @v_fmuladd_bf16(bfloat %a, bfloat %b, bfloat %c) {
; GFX11TRUE16: ; %bb.0:
; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11TRUE16-NEXT: v_mov_b16_e32 v3.l, 0
-; GFX11TRUE16-NEXT: v_mov_b16_e32 v3.h, v1.l
+; GFX11TRUE16-NEXT: v_mov_b16_e32 v3.h, v2.l
+; GFX11TRUE16-NEXT: v_mov_b16_e32 v2.h, v1.l
; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.h, v0.l
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11TRUE16-NEXT: v_mov_b16_e32 v2.l, v3.l
; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, v3.l
-; GFX11TRUE16-NEXT: v_mul_f32_e32 v0, v1, v3
-; GFX11TRUE16-NEXT: v_mov_b16_e32 v3.h, v2.l
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX11TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v0
-; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11TRUE16-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v4, vcc_lo
-; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX11TRUE16-NEXT: v_fmac_f32_e32 v3, v1, v2
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11TRUE16-NEXT: v_bfe_u32 v0, v3, 16, 1
+; GFX11TRUE16-NEXT: v_or_b32_e32 v1, 0x400000, v3
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11TRUE16-NEXT: v_add3_u32 v0, v0, v3, 0x7fff
; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11TRUE16-NEXT: v_add_f32_e32 v0, v0, v3
-; GFX11TRUE16-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX11TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
-; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11TRUE16-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
-; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc_lo
; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11FAKE16-LABEL: v_fmuladd_bf16:
; GFX11FAKE16: ; %bb.0:
; GFX11FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11FAKE16-NEXT: v_mul_f32_e32 v0, v0, v1
-; GFX11FAKE16-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX11FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v0
-; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11FAKE16-NEXT: v_fmac_f32_e32 v2, v0, v1
+; GFX11FAKE16-NEXT: v_bfe_u32 v0, v2, 16, 1
+; GFX11FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v2
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11FAKE16-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
-; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v0, v1, v3 :: v_dual_lshlrev_b32 v1, 16, v2
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11FAKE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11FAKE16-NEXT: v_add_f32_e32 v0, v0, v1
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11FAKE16-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX11FAKE16-NEXT: v_or_b32_e32 v2, 0x400000, v0
-; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11FAKE16-NEXT: v_add3_u32 v1, v1, v0, 0x7fff
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11FAKE16-NEXT: v_add3_u32 v0, v0, v2, 0x7fff
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc_lo
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11FAKE16-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31]
%op = call bfloat @llvm.fmuladd.bf16(bfloat %a, bfloat %b, bfloat %c)
@@ -47235,39 +47193,22 @@ define <2 x bfloat> @v_fmuladd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfl
; GFX8-LABEL: v_fmuladd_v2bf16:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v1
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v0
-; GFX8-NEXT: v_mul_f32_e32 v3, v4, v3
-; GFX8-NEXT: v_bfe_u32 v4, v3, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v3
-; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0x7fff, v4
-; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v3
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
-; GFX8-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc
-; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX8-NEXT: v_add_f32_e32 v3, v3, v4
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v2
+; GFX8-NEXT: v_lshlrev_b32_e32 v4, 16, v1
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v0
+; GFX8-NEXT: v_fma_f32 v3, v5, v4, v3
; GFX8-NEXT: v_bfe_u32 v4, v3, 16, 1
-; GFX8-NEXT: s_movk_i32 s4, 0x7fff
; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v3
+; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX8-NEXT: v_add_u32_e32 v4, vcc, s4, v4
-; GFX8-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0x7fff, v4
+; GFX8-NEXT: v_fma_f32 v0, v0, v1, v2
; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v3
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
-; GFX8-NEXT: v_add_u32_e32 v1, vcc, s4, v1
-; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v0
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
-; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v4, vcc
-; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v2
-; GFX8-NEXT: v_add_f32_e32 v0, v0, v1
-; GFX8-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v0
; GFX8-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
@@ -47279,36 +47220,22 @@ define <2 x bfloat> @v_fmuladd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfl
; GFX900-LABEL: v_fmuladd_v2bf16:
; GFX900: ; %bb.0:
; GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX900-NEXT: v_lshlrev_b32_e32 v3, 16, v1
-; GFX900-NEXT: v_lshlrev_b32_e32 v4, 16, v0
-; GFX900-NEXT: v_mul_f32_e32 v3, v4, v3
-; GFX900-NEXT: v_bfe_u32 v4, v3, 16, 1
-; GFX900-NEXT: s_movk_i32 s4, 0x7fff
-; GFX900-NEXT: v_add3_u32 v4, v4, v3, s4
-; GFX900-NEXT: v_or_b32_e32 v5, 0x400000, v3
-; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
-; GFX900-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc
-; GFX900-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX900-NEXT: v_lshlrev_b32_e32 v4, 16, v2
-; GFX900-NEXT: v_add_f32_e32 v3, v3, v4
+; GFX900-NEXT: v_lshlrev_b32_e32 v3, 16, v2
+; GFX900-NEXT: v_lshlrev_b32_e32 v4, 16, v1
+; GFX900-NEXT: v_lshlrev_b32_e32 v5, 16, v0
+; GFX900-NEXT: v_fma_f32 v3, v5, v4, v3
+; GFX900-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX900-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX900-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX900-NEXT: v_bfe_u32 v4, v3, 16, 1
-; GFX900-NEXT: v_mul_f32_e32 v0, v0, v1
+; GFX900-NEXT: s_movk_i32 s4, 0x7fff
+; GFX900-NEXT: v_fma_f32 v0, v0, v1, v2
; GFX900-NEXT: v_add3_u32 v4, v4, v3, s4
; GFX900-NEXT: v_or_b32_e32 v5, 0x400000, v3
; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX900-NEXT: v_bfe_u32 v1, v0, 16, 1
; GFX900-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc
; GFX900-NEXT: v_add3_u32 v1, v1, v0, s4
-; GFX900-NEXT: v_or_b32_e32 v4, 0x400000, v0
-; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
-; GFX900-NEXT: v_cndmask_b32_e32 v0, v1, v4, vcc
-; GFX900-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX900-NEXT: v_and_b32_e32 v1, 0xffff0000, v2
-; GFX900-NEXT: v_add_f32_e32 v0, v0, v1
-; GFX900-NEXT: v_bfe_u32 v1, v0, 16, 1
-; GFX900-NEXT: v_add3_u32 v1, v1, v0, s4
; GFX900-NEXT: v_or_b32_e32 v2, 0x400000, v0
; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX900-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
@@ -47319,150 +47246,94 @@ define <2 x bfloat> @v_fmuladd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfl
; GFX950-LABEL: v_fmuladd_v2bf16:
; GFX950: ; %bb.0:
; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v1
-; GFX950-NEXT: v_and_b32_e32 v4, 0xffff0000, v0
+; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
+; GFX950-NEXT: v_and_b32_e32 v4, 0xffff0000, v1
+; GFX950-NEXT: v_and_b32_e32 v5, 0xffff0000, v0
+; GFX950-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX950-NEXT: v_mul_f32_e32 v3, v4, v3
-; GFX950-NEXT: v_mul_f32_e32 v0, v0, v1
-; GFX950-NEXT: v_cvt_pk_bf16_f32 v3, v3, s0
-; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0
-; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX950-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
-; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v2
-; GFX950-NEXT: v_add_f32_e32 v3, v3, v4
-; GFX950-NEXT: v_add_f32_e32 v0, v0, v1
-; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v0, v3
+; GFX950-NEXT: v_fmac_f32_e32 v3, v5, v4
+; GFX950-NEXT: v_fmac_f32_e32 v2, v0, v1
+; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v2, v3
; GFX950-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: v_fmuladd_v2bf16:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v1
-; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v0
-; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT: v_mul_f32_e32 v3, v4, v3
-; GFX10-NEXT: v_mul_f32_e32 v0, v0, v1
-; GFX10-NEXT: v_bfe_u32 v1, v3, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v3
-; GFX10-NEXT: v_bfe_u32 v4, v0, 16, 1
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v0
-; GFX10-NEXT: v_add3_u32 v1, v1, v3, 0x7fff
; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v2
-; GFX10-NEXT: v_add3_u32 v4, v4, v0, 0x7fff
+; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v1
+; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v0
; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX10-NEXT: v_cndmask_b32_e32 v0, v4, v6, vcc_lo
-; GFX10-NEXT: v_add_f32_e32 v1, v1, v3
; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v1
-; GFX10-NEXT: v_add_f32_e32 v0, v0, v2
-; GFX10-NEXT: v_bfe_u32 v2, v1, 16, 1
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX10-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX10-NEXT: v_add3_u32 v2, v2, v1, 0x7fff
-; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v0
-; GFX10-NEXT: v_add3_u32 v3, v3, v0, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v1, v2, v4, vcc_lo
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX10-NEXT: v_cndmask_b32_e32 v0, v3, v5, vcc_lo
-; GFX10-NEXT: v_perm_b32 v0, v0, v1, 0x7060302
+; GFX10-NEXT: v_fmac_f32_e32 v3, v5, v4
+; GFX10-NEXT: v_fmac_f32_e32 v2, v0, v1
+; GFX10-NEXT: v_bfe_u32 v0, v3, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX10-NEXT: v_bfe_u32 v1, v2, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v2
+; GFX10-NEXT: v_add3_u32 v0, v0, v3, 0x7fff
+; GFX10-NEXT: v_add3_u32 v1, v1, v2, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo
+; GFX10-NEXT: v_perm_b32 v0, v1, v0, 0x7060302
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11TRUE16-LABEL: v_fmuladd_v2bf16:
; GFX11TRUE16: ; %bb.0:
; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v1
-; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v0
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v1
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v0
; GFX11TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11TRUE16-NEXT: v_dual_mul_f32 v3, v4, v3 :: v_dual_and_b32 v0, 0xffff0000, v0
-; GFX11TRUE16-NEXT: v_mul_f32_e32 v0, v0, v1
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11TRUE16-NEXT: v_bfe_u32 v1, v3, 16, 1
-; GFX11TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v3
-; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11TRUE16-NEXT: v_add3_u32 v1, v1, v3, 0x7fff
+; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v2
-; GFX11TRUE16-NEXT: v_bfe_u32 v4, v0, 16, 1
-; GFX11TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v0
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo
-; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11TRUE16-NEXT: v_add3_u32 v4, v4, v0, 0x7fff
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX11TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11TRUE16-NEXT: v_dual_cndmask_b32 v0, v4, v6 :: v_dual_add_f32 v1, v1, v3
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
-; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11TRUE16-NEXT: v_add_f32_e32 v0, v0, v2
-; GFX11TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
-; GFX11TRUE16-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11TRUE16-NEXT: v_add3_u32 v2, v2, v1, 0x7fff
-; GFX11TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v0
-; GFX11TRUE16-NEXT: v_add3_u32 v3, v3, v0, 0x7fff
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v4, vcc_lo
-; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v3, v5, vcc_lo
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, v1.h
-; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0xffff, v1, v0
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11TRUE16-NEXT: v_dual_fmac_f32 v2, v0, v1 :: v_dual_fmac_f32 v3, v5, v4
+; GFX11TRUE16-NEXT: v_bfe_u32 v1, v2, 16, 1
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11TRUE16-NEXT: v_bfe_u32 v0, v3, 16, 1
+; GFX11TRUE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v2
+; GFX11TRUE16-NEXT: v_add3_u32 v1, v1, v2, 0x7fff
+; GFX11TRUE16-NEXT: v_add3_u32 v0, v0, v3, 0x7fff
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0xffff, v0, v1
; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11FAKE16-LABEL: v_fmuladd_v2bf16:
; GFX11FAKE16: ; %bb.0:
; GFX11FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v1
-; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v0
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v4, 16, v1
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v0
; GFX11FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11FAKE16-NEXT: v_dual_mul_f32 v3, v4, v3 :: v_dual_and_b32 v0, 0xffff0000, v0
-; GFX11FAKE16-NEXT: v_mul_f32_e32 v0, v0, v1
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11FAKE16-NEXT: v_bfe_u32 v1, v3, 16, 1
-; GFX11FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v3
-; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11FAKE16-NEXT: v_add3_u32 v1, v1, v3, 0x7fff
+; GFX11FAKE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v2
-; GFX11FAKE16-NEXT: v_bfe_u32 v4, v0, 16, 1
-; GFX11FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v0
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
-; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo
-; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11FAKE16-NEXT: v_add3_u32 v4, v4, v0, 0x7fff
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX11FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v0, v4, v6 :: v_dual_add_f32 v1, v1, v3
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11FAKE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v1
-; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11FAKE16-NEXT: v_add_f32_e32 v0, v0, v2
-; GFX11FAKE16-NEXT: v_bfe_u32 v2, v1, 16, 1
-; GFX11FAKE16-NEXT: v_bfe_u32 v3, v0, 16, 1
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11FAKE16-NEXT: v_add3_u32 v2, v2, v1, 0x7fff
-; GFX11FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v0
-; GFX11FAKE16-NEXT: v_add3_u32 v3, v3, v0, 0x7fff
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v1, v2, v4, vcc_lo
-; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v0, v3, v5, vcc_lo
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11FAKE16-NEXT: v_dual_fmac_f32 v2, v0, v1 :: v_dual_fmac_f32 v3, v5, v4
+; GFX11FAKE16-NEXT: v_bfe_u32 v1, v2, 16, 1
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11FAKE16-NEXT: v_bfe_u32 v0, v3, 16, 1
+; GFX11FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v3
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v2
+; GFX11FAKE16-NEXT: v_add3_u32 v1, v1, v2, 0x7fff
+; GFX11FAKE16-NEXT: v_add3_u32 v0, v0, v3, 0x7fff
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo
; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11FAKE16-NEXT: v_perm_b32 v0, v0, v1, 0x7060302
+; GFX11FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x7060302
; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31]
%op = call <2 x bfloat> @llvm.fmuladd.v2bf16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfloat> %c)
ret <2 x bfloat> %op
@@ -47542,57 +47413,33 @@ define <3 x bfloat> @v_fmuladd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfl
; GFX8-LABEL: v_fmuladd_v3bf16:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; GFX8-NEXT: v_mul_f32_e32 v1, v1, v3
+; GFX8-NEXT: v_fma_f32 v1, v1, v3, v5
; GFX8-NEXT: v_bfe_u32 v3, v1, 16, 1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, 0x7fff, v3
-; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v1
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
-; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v6, vcc
-; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v5
-; GFX8-NEXT: v_add_f32_e32 v1, v1, v3
-; GFX8-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX8-NEXT: s_movk_i32 s4, 0x7fff
-; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v1
-; GFX8-NEXT: v_add_u32_e32 v3, vcc, s4, v3
; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v1
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v0
-; GFX8-NEXT: v_mul_f32_e32 v3, v5, v3
-; GFX8-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v3
-; GFX8-NEXT: v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
-; GFX8-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
-; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v4
-; GFX8-NEXT: v_add_f32_e32 v3, v3, v5
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX8-NEXT: v_lshlrev_b32_e32 v6, 16, v0
+; GFX8-NEXT: v_fma_f32 v3, v6, v5, v3
; GFX8-NEXT: v_bfe_u32 v5, v3, 16, 1
+; GFX8-NEXT: s_movk_i32 s4, 0x7fff
; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v3
+; GFX8-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX8-NEXT: v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT: v_mul_f32_e32 v0, v0, v2
+; GFX8-NEXT: v_fma_f32 v0, v0, v2, v4
; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v3
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX8-NEXT: v_bfe_u32 v2, v0, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v0
-; GFX8-NEXT: v_add_u32_e32 v2, vcc, s4, v2
-; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v0
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
-; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v5, vcc
-; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v4
-; GFX8-NEXT: v_add_f32_e32 v0, v0, v2
-; GFX8-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v0
; GFX8-NEXT: v_add_u32_e32 v2, vcc, 0x7fff, v2
; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
@@ -47605,52 +47452,31 @@ define <3 x bfloat> @v_fmuladd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfl
; GFX900-LABEL: v_fmuladd_v3bf16:
; GFX900: ; %bb.0:
; GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX900-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; GFX900-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX900-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; GFX900-NEXT: v_mul_f32_e32 v1, v1, v3
+; GFX900-NEXT: v_fma_f32 v1, v1, v3, v5
; GFX900-NEXT: v_bfe_u32 v3, v1, 16, 1
; GFX900-NEXT: s_movk_i32 s4, 0x7fff
; GFX900-NEXT: v_add3_u32 v3, v3, v1, s4
-; GFX900-NEXT: v_or_b32_e32 v6, 0x400000, v1
-; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
-; GFX900-NEXT: v_cndmask_b32_e32 v1, v3, v6, vcc
-; GFX900-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX900-NEXT: v_lshlrev_b32_e32 v3, 16, v5
-; GFX900-NEXT: v_add_f32_e32 v1, v1, v3
-; GFX900-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX900-NEXT: v_add3_u32 v3, v3, v1, s4
; GFX900-NEXT: v_or_b32_e32 v5, 0x400000, v1
; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX900-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc
-; GFX900-NEXT: v_lshlrev_b32_e32 v3, 16, v2
-; GFX900-NEXT: v_lshlrev_b32_e32 v5, 16, v0
-; GFX900-NEXT: v_mul_f32_e32 v3, v5, v3
-; GFX900-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX900-NEXT: v_add3_u32 v5, v5, v3, s4
-; GFX900-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
-; GFX900-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
-; GFX900-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX900-NEXT: v_lshlrev_b32_e32 v5, 16, v4
-; GFX900-NEXT: v_add_f32_e32 v3, v3, v5
+; GFX900-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX900-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX900-NEXT: v_lshlrev_b32_e32 v6, 16, v0
+; GFX900-NEXT: v_fma_f32 v3, v6, v5, v3
+; GFX900-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX900-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX900-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX900-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX900-NEXT: v_mul_f32_e32 v0, v0, v2
+; GFX900-NEXT: v_fma_f32 v0, v0, v2, v4
; GFX900-NEXT: v_add3_u32 v5, v5, v3, s4
; GFX900-NEXT: v_or_b32_e32 v6, 0x400000, v3
; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX900-NEXT: v_bfe_u32 v2, v0, 16, 1
; GFX900-NEXT: v_cndmask_b32_e32 v3, v5, v6, vcc
; GFX900-NEXT: v_add3_u32 v2, v2, v0, s4
-; GFX900-NEXT: v_or_b32_e32 v5, 0x400000, v0
-; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
-; GFX900-NEXT: v_cndmask_b32_e32 v0, v2, v5, vcc
-; GFX900-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX900-NEXT: v_and_b32_e32 v2, 0xffff0000, v4
-; GFX900-NEXT: v_add_f32_e32 v0, v0, v2
-; GFX900-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX900-NEXT: v_add3_u32 v2, v2, v0, s4
; GFX900-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX900-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
@@ -47662,211 +47488,132 @@ define <3 x bfloat> @v_fmuladd_v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfl
; GFX950-LABEL: v_fmuladd_v3bf16:
; GFX950: ; %bb.0:
; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX950-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; GFX950-NEXT: v_mul_f32_e32 v1, v1, v3
-; GFX950-NEXT: v_cvt_pk_bf16_f32 v1, v1, s0
-; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v5
-; GFX950-NEXT: v_add_f32_e32 v1, v1, v3
-; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
-; GFX950-NEXT: v_and_b32_e32 v5, 0xffff0000, v0
+; GFX950-NEXT: v_fmac_f32_e32 v5, v1, v3
+; GFX950-NEXT: v_cvt_pk_bf16_f32 v1, v5, s0
+; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v4
+; GFX950-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; GFX950-NEXT: v_and_b32_e32 v6, 0xffff0000, v0
+; GFX950-NEXT: v_lshlrev_b32_e32 v4, 16, v4
; GFX950-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX950-NEXT: v_mul_f32_e32 v3, v5, v3
-; GFX950-NEXT: v_mul_f32_e32 v0, v0, v2
-; GFX950-NEXT: v_cvt_pk_bf16_f32 v3, v3, s0
-; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0
-; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX950-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
-; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX950-NEXT: v_lshlrev_b32_e32 v2, 16, v4
-; GFX950-NEXT: v_add_f32_e32 v3, v3, v5
-; GFX950-NEXT: v_add_f32_e32 v0, v0, v2
-; GFX950-NEXT: v_cvt_pk_bf16_f32 v1, v1, s0
-; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v0, v3
+; GFX950-NEXT: v_fmac_f32_e32 v3, v6, v5
+; GFX950-NEXT: v_fmac_f32_e32 v4, v0, v2
+; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v4, v3
; GFX950-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: v_fmuladd_v3bf16:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v4
+; GFX10-NEXT: v_lshlrev_b32_e32 v7, 16, v2
+; GFX10-NEXT: v_lshlrev_b32_e32 v8, 16, v0
+; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX10-NEXT: v_lshlrev_b32_e32 v7, 16, v0
-; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT: v_mul_f32_e32 v1, v1, v3
-; GFX10-NEXT: v_mul_f32_e32 v3, v7, v6
-; GFX10-NEXT: v_mul_f32_e32 v0, v0, v2
-; GFX10-NEXT: v_bfe_u32 v2, v1, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v1
-; GFX10-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX10-NEXT: v_bfe_u32 v8, v0, 16, 1
-; GFX10-NEXT: v_add3_u32 v2, v2, v1, 0x7fff
-; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX10-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX10-NEXT: v_or_b32_e32 v10, 0x400000, v0
-; GFX10-NEXT: v_add3_u32 v8, v8, v0, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v1, v2, v6, vcc_lo
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v5
-; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v4
; GFX10-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX10-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX10-NEXT: v_add_f32_e32 v1, v1, v3
; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX10-NEXT: v_cndmask_b32_e32 v0, v8, v10, vcc_lo
-; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v1
-; GFX10-NEXT: v_add_f32_e32 v2, v2, v5
; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX10-NEXT: v_add_f32_e32 v0, v0, v4
-; GFX10-NEXT: v_or_b32_e32 v7, 0x400000, v2
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX10-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX10-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
-; GFX10-NEXT: v_bfe_u32 v5, v0, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX10-NEXT: v_add3_u32 v4, v4, v1, 0x7fff
-; GFX10-NEXT: v_cndmask_b32_e32 v2, v3, v7, vcc_lo
-; GFX10-NEXT: v_add3_u32 v5, v5, v0, 0x7fff
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX10-NEXT: v_cndmask_b32_e32 v0, v5, v8, vcc_lo
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX10-NEXT: v_perm_b32 v0, v0, v2, 0x7060302
-; GFX10-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
-; GFX10-NEXT: v_alignbit_b32 v1, s4, v1, 16
+; GFX10-NEXT: v_fmac_f32_e32 v6, v8, v7
+; GFX10-NEXT: v_fmac_f32_e32 v5, v1, v3
+; GFX10-NEXT: v_fmac_f32_e32 v4, v0, v2
+; GFX10-NEXT: v_bfe_u32 v1, v6, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v3, 0x400000, v6
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX10-NEXT: v_bfe_u32 v0, v5, 16, 1
+; GFX10-NEXT: v_bfe_u32 v2, v4, 16, 1
+; GFX10-NEXT: v_add3_u32 v1, v1, v6, 0x7fff
+; GFX10-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX10-NEXT: v_add3_u32 v0, v0, v5, 0x7fff
+; GFX10-NEXT: v_add3_u32 v2, v2, v4, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v0, v8, vcc_lo
+; GFX10-NEXT: v_perm_b32 v0, v2, v1, 0x7060302
+; GFX10-NEXT: v_alignbit_b32 v1, s4, v3, 16
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11TRUE16-LABEL: v_fmuladd_v3bf16:
; GFX11TRUE16: ; %bb.0:
; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v7, 16, v2
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v8, 16, v0
+; GFX11TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX11TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v7, 16, v0
-; GFX11TRUE16-NEXT: v_dual_mul_f32 v1, v1, v3 :: v_dual_and_b32 v0, 0xffff0000, v0
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v4
+; GFX11TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11TRUE16-NEXT: v_mul_f32_e32 v0, v0, v2
-; GFX11TRUE16-NEXT: v_mul_f32_e32 v6, v7, v6
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11TRUE16-NEXT: v_bfe_u32 v9, v1, 16, 1
-; GFX11TRUE16-NEXT: v_bfe_u32 v7, v0, 16, 1
+; GFX11TRUE16-NEXT: v_dual_fmac_f32 v4, v0, v2 :: v_dual_fmac_f32 v5, v1, v3
+; GFX11TRUE16-NEXT: v_fmac_f32_e32 v6, v8, v7
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11TRUE16-NEXT: v_bfe_u32 v1, v4, 16, 1
+; GFX11TRUE16-NEXT: v_bfe_u32 v3, v5, 16, 1
; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11TRUE16-NEXT: v_bfe_u32 v2, v6, 16, 1
-; GFX11TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v6
+; GFX11TRUE16-NEXT: v_bfe_u32 v0, v6, 16, 1
+; GFX11TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v6
; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v0
-; GFX11TRUE16-NEXT: v_add3_u32 v7, v7, v0, 0x7fff
-; GFX11TRUE16-NEXT: v_add3_u32 v2, v2, v6, 0x7fff
-; GFX11TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v1
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc_lo
-; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v4
-; GFX11TRUE16-NEXT: v_add3_u32 v8, v9, v1, 0x7fff
-; GFX11TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GFX11TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v7, v10, vcc_lo
-; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11TRUE16-NEXT: v_add_f32_e32 v2, v2, v3
-; GFX11TRUE16-NEXT: v_dual_cndmask_b32 v1, v8, v6 :: v_dual_and_b32 v0, 0xffff0000, v0
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11TRUE16-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX11TRUE16-NEXT: v_dual_add_f32 v0, v0, v4 :: v_dual_and_b32 v1, 0xffff0000, v1
-; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11TRUE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
-; GFX11TRUE16-NEXT: v_bfe_u32 v4, v0, 16, 1
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11TRUE16-NEXT: v_add_f32_e32 v1, v1, v5
-; GFX11TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v2
-; GFX11TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v0
-; GFX11TRUE16-NEXT: v_add3_u32 v4, v4, v0, 0x7fff
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11TRUE16-NEXT: v_bfe_u32 v6, v1, 16, 1
-; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v2, v3, v5, vcc_lo
-; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v1
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11TRUE16-NEXT: v_add3_u32 v5, v6, v1, 0x7fff
-; GFX11TRUE16-NEXT: v_mov_b16_e32 v2.l, v2.h
-; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v4, v7, vcc_lo
-; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
+; GFX11TRUE16-NEXT: v_add3_u32 v1, v1, v4, 0x7fff
+; GFX11TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX11TRUE16-NEXT: v_add3_u32 v0, v0, v6, 0x7fff
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11TRUE16-NEXT: v_add3_u32 v2, v3, v5, 0x7fff
+; GFX11TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v5
+; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0xffff, v2, v0
-; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v5, v3, vcc_lo
-; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, v1.h
+; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0xffff, v0, v1
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, v2.h
; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11FAKE16-LABEL: v_fmuladd_v3bf16:
; GFX11FAKE16: ; %bb.0:
; GFX11FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v2
-; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v7, 16, v0
-; GFX11FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11FAKE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11FAKE16-NEXT: v_dual_mul_f32 v0, v0, v2 :: v_dual_lshlrev_b32 v3, 16, v3
-; GFX11FAKE16-NEXT: v_bfe_u32 v8, v0, 16, 1
-; GFX11FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v0
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11FAKE16-NEXT: v_add3_u32 v8, v8, v0, 0x7fff
-; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; GFX11FAKE16-NEXT: v_mul_f32_e32 v1, v1, v3
-; GFX11FAKE16-NEXT: v_mul_f32_e32 v3, v7, v6
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11FAKE16-NEXT: v_bfe_u32 v2, v1, 16, 1
-; GFX11FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v1
-; GFX11FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11FAKE16-NEXT: v_add3_u32 v2, v2, v1, 0x7fff
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11FAKE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v1, v2, v6, vcc_lo
-; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v5
-; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v5, 16, v4
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v4
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v7, 16, v2
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v8, 16, v0
; GFX11FAKE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v2, v7, v9, vcc_lo
-; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
; GFX11FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v0, v8, v10, vcc_lo
-; GFX11FAKE16-NEXT: v_add_f32_e32 v2, v2, v5
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
; GFX11FAKE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v2
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11FAKE16-NEXT: v_dual_add_f32 v0, v0, v4 :: v_dual_add_f32 v1, v1, v3
-; GFX11FAKE16-NEXT: v_bfe_u32 v3, v2, 16, 1
-; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11FAKE16-NEXT: v_bfe_u32 v5, v0, 16, 1
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11FAKE16-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11FAKE16-NEXT: v_add3_u32 v3, v3, v2, 0x7fff
-; GFX11FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v1
-; GFX11FAKE16-NEXT: v_add3_u32 v5, v5, v0, 0x7fff
-; GFX11FAKE16-NEXT: v_add3_u32 v4, v4, v1, 0x7fff
-; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v2, v3, v7, vcc_lo
-; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v0, v5, v8, vcc_lo
-; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11FAKE16-NEXT: v_perm_b32 v0, v0, v2, 0x7060302
-; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v1, v4, v6, vcc_lo
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11FAKE16-NEXT: v_alignbit_b32 v1, s0, v1, 16
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11FAKE16-NEXT: v_dual_fmac_f32 v6, v8, v7 :: v_dual_lshlrev_b32 v5, 16, v5
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11FAKE16-NEXT: v_fmac_f32_e32 v4, v0, v2
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11FAKE16-NEXT: v_bfe_u32 v2, v4, 16, 1
+; GFX11FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v4
+; GFX11FAKE16-NEXT: v_add3_u32 v2, v2, v4, 0x7fff
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11FAKE16-NEXT: v_fmac_f32_e32 v5, v1, v3
+; GFX11FAKE16-NEXT: v_bfe_u32 v1, v6, 16, 1
+; GFX11FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v6
+; GFX11FAKE16-NEXT: v_bfe_u32 v0, v5, 16, 1
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11FAKE16-NEXT: v_add3_u32 v1, v1, v6, 0x7fff
+; GFX11FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11FAKE16-NEXT: v_add3_u32 v0, v0, v5, 0x7fff
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_3)
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v7, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v3, v0, v8, vcc_lo
+; GFX11FAKE16-NEXT: v_perm_b32 v0, v2, v1, 0x7060302
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11FAKE16-NEXT: v_alignbit_b32 v1, s0, v3, 16
; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31]
%op = call <3 x bfloat> @llvm.fmuladd.v3bf16(<3 x bfloat> %a, <3 x bfloat> %b, <3 x bfloat> %c)
ret <3 x bfloat> %op
@@ -47966,75 +47713,43 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl
; GFX8-LABEL: v_fmuladd_v4bf16:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_lshlrev_b32_e32 v6, 16, v3
-; GFX8-NEXT: v_lshlrev_b32_e32 v7, 16, v1
-; GFX8-NEXT: v_mul_f32_e32 v6, v7, v6
-; GFX8-NEXT: v_bfe_u32 v7, v6, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v6
-; GFX8-NEXT: v_add_u32_e32 v7, vcc, 0x7fff, v7
-; GFX8-NEXT: v_or_b32_e32 v8, 0x400000, v6
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
-; GFX8-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc
-; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX8-NEXT: v_lshlrev_b32_e32 v7, 16, v5
-; GFX8-NEXT: v_add_f32_e32 v6, v6, v7
+; GFX8-NEXT: v_lshlrev_b32_e32 v6, 16, v5
+; GFX8-NEXT: v_lshlrev_b32_e32 v7, 16, v3
+; GFX8-NEXT: v_lshlrev_b32_e32 v8, 16, v1
+; GFX8-NEXT: v_fma_f32 v6, v8, v7, v6
; GFX8-NEXT: v_bfe_u32 v7, v6, 16, 1
-; GFX8-NEXT: s_movk_i32 s4, 0x7fff
; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v6
+; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX8-NEXT: v_add_u32_e32 v7, vcc, s4, v7
-; GFX8-NEXT: v_mul_f32_e32 v1, v1, v3
+; GFX8-NEXT: v_add_u32_e32 v7, vcc, 0x7fff, v7
+; GFX8-NEXT: v_fma_f32 v1, v1, v3, v5
; GFX8-NEXT: v_or_b32_e32 v8, 0x400000, v6
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
; GFX8-NEXT: v_bfe_u32 v3, v1, 16, 1
+; GFX8-NEXT: s_movk_i32 s4, 0x7fff
; GFX8-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v1
; GFX8-NEXT: v_add_u32_e32 v3, vcc, s4, v3
-; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v1
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
-; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v7, vcc
-; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v5
-; GFX8-NEXT: v_add_f32_e32 v1, v1, v3
-; GFX8-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v1
-; GFX8-NEXT: v_add_u32_e32 v3, vcc, s4, v3
; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v1
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc
-; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v2
-; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v0
-; GFX8-NEXT: v_mul_f32_e32 v3, v5, v3
-; GFX8-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v3
-; GFX8-NEXT: v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
-; GFX8-NEXT: v_cndmask_b32_e32 v3, v5, v7, vcc
-; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v4
-; GFX8-NEXT: v_add_f32_e32 v3, v3, v5
+; GFX8-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX8-NEXT: v_lshlrev_b32_e32 v7, 16, v0
+; GFX8-NEXT: v_fma_f32 v3, v7, v5, v3
; GFX8-NEXT: v_bfe_u32 v5, v3, 16, 1
; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v3
+; GFX8-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX8-NEXT: v_add_u32_e32 v5, vcc, s4, v5
-; GFX8-NEXT: v_mul_f32_e32 v0, v0, v2
+; GFX8-NEXT: v_fma_f32 v0, v0, v2, v4
; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX8-NEXT: v_bfe_u32 v2, v0, 16, 1
; GFX8-NEXT: v_cndmask_b32_e32 v3, v5, v7, vcc
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v0
-; GFX8-NEXT: v_add_u32_e32 v2, vcc, s4, v2
-; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v0
-; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
-; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v5, vcc
-; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v4
-; GFX8-NEXT: v_add_f32_e32 v0, v0, v2
-; GFX8-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v0
; GFX8-NEXT: v_add_u32_e32 v2, vcc, 0x7fff, v2
; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
@@ -48048,68 +47763,40 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl
; GFX900-LABEL: v_fmuladd_v4bf16:
; GFX900: ; %bb.0:
; GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX900-NEXT: v_lshlrev_b32_e32 v6, 16, v3
-; GFX900-NEXT: v_lshlrev_b32_e32 v7, 16, v1
-; GFX900-NEXT: v_mul_f32_e32 v6, v7, v6
-; GFX900-NEXT: v_bfe_u32 v7, v6, 16, 1
-; GFX900-NEXT: s_movk_i32 s4, 0x7fff
-; GFX900-NEXT: v_add3_u32 v7, v7, v6, s4
-; GFX900-NEXT: v_or_b32_e32 v8, 0x400000, v6
-; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
-; GFX900-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc
-; GFX900-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX900-NEXT: v_lshlrev_b32_e32 v7, 16, v5
-; GFX900-NEXT: v_add_f32_e32 v6, v6, v7
+; GFX900-NEXT: v_lshlrev_b32_e32 v6, 16, v5
+; GFX900-NEXT: v_lshlrev_b32_e32 v7, 16, v3
+; GFX900-NEXT: v_lshlrev_b32_e32 v8, 16, v1
+; GFX900-NEXT: v_fma_f32 v6, v8, v7, v6
+; GFX900-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX900-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX900-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX900-NEXT: v_bfe_u32 v7, v6, 16, 1
-; GFX900-NEXT: v_mul_f32_e32 v1, v1, v3
+; GFX900-NEXT: s_movk_i32 s4, 0x7fff
+; GFX900-NEXT: v_fma_f32 v1, v1, v3, v5
; GFX900-NEXT: v_add3_u32 v7, v7, v6, s4
; GFX900-NEXT: v_or_b32_e32 v8, 0x400000, v6
; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
; GFX900-NEXT: v_bfe_u32 v3, v1, 16, 1
; GFX900-NEXT: v_cndmask_b32_e32 v6, v7, v8, vcc
; GFX900-NEXT: v_add3_u32 v3, v3, v1, s4
-; GFX900-NEXT: v_or_b32_e32 v7, 0x400000, v1
-; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
-; GFX900-NEXT: v_cndmask_b32_e32 v1, v3, v7, vcc
-; GFX900-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX900-NEXT: v_and_b32_e32 v3, 0xffff0000, v5
-; GFX900-NEXT: v_add_f32_e32 v1, v1, v3
-; GFX900-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX900-NEXT: v_add3_u32 v3, v3, v1, s4
; GFX900-NEXT: v_or_b32_e32 v5, 0x400000, v1
; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
; GFX900-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc
-; GFX900-NEXT: v_lshlrev_b32_e32 v3, 16, v2
-; GFX900-NEXT: v_lshlrev_b32_e32 v5, 16, v0
-; GFX900-NEXT: v_mul_f32_e32 v3, v5, v3
-; GFX900-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX900-NEXT: v_add3_u32 v5, v5, v3, s4
-; GFX900-NEXT: v_or_b32_e32 v7, 0x400000, v3
-; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
-; GFX900-NEXT: v_cndmask_b32_e32 v3, v5, v7, vcc
-; GFX900-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX900-NEXT: v_lshlrev_b32_e32 v5, 16, v4
-; GFX900-NEXT: v_add_f32_e32 v3, v3, v5
+; GFX900-NEXT: v_lshlrev_b32_e32 v3, 16, v4
+; GFX900-NEXT: v_lshlrev_b32_e32 v5, 16, v2
+; GFX900-NEXT: v_lshlrev_b32_e32 v7, 16, v0
+; GFX900-NEXT: v_fma_f32 v3, v7, v5, v3
+; GFX900-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX900-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; GFX900-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX900-NEXT: v_bfe_u32 v5, v3, 16, 1
-; GFX900-NEXT: v_mul_f32_e32 v0, v0, v2
+; GFX900-NEXT: v_fma_f32 v0, v0, v2, v4
; GFX900-NEXT: v_add3_u32 v5, v5, v3, s4
; GFX900-NEXT: v_or_b32_e32 v7, 0x400000, v3
; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
; GFX900-NEXT: v_bfe_u32 v2, v0, 16, 1
; GFX900-NEXT: v_cndmask_b32_e32 v3, v5, v7, vcc
; GFX900-NEXT: v_add3_u32 v2, v2, v0, s4
-; GFX900-NEXT: v_or_b32_e32 v5, 0x400000, v0
-; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
-; GFX900-NEXT: v_cndmask_b32_e32 v0, v2, v5, vcc
-; GFX900-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX900-NEXT: v_and_b32_e32 v2, 0xffff0000, v4
-; GFX900-NEXT: v_add_f32_e32 v0, v0, v2
-; GFX900-NEXT: v_bfe_u32 v2, v0, 16, 1
-; GFX900-NEXT: v_add3_u32 v2, v2, v0, s4
; GFX900-NEXT: v_or_b32_e32 v4, 0x400000, v0
; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
; GFX900-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
@@ -48121,264 +47808,162 @@ define <4 x bfloat> @v_fmuladd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfl
; GFX950-LABEL: v_fmuladd_v4bf16:
; GFX950: ; %bb.0:
; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX950-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX950-NEXT: v_and_b32_e32 v7, 0xffff0000, v1
+; GFX950-NEXT: v_and_b32_e32 v6, 0xffff0000, v5
+; GFX950-NEXT: v_and_b32_e32 v7, 0xffff0000, v3
+; GFX950-NEXT: v_and_b32_e32 v8, 0xffff0000, v1
+; GFX950-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; GFX950-NEXT: v_mul_f32_e32 v1, v1, v3
-; GFX950-NEXT: v_cvt_pk_bf16_f32 v1, v1, s0
-; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v5
-; GFX950-NEXT: v_mul_f32_e32 v6, v7, v6
-; GFX950-NEXT: v_and_b32_e32 v7, 0xffff0000, v5
-; GFX950-NEXT: v_add_f32_e32 v1, v1, v3
+; GFX950-NEXT: v_fmac_f32_e32 v6, v8, v7
+; GFX950-NEXT: v_fmac_f32_e32 v5, v1, v3
+; GFX950-NEXT: v_and_b32_e32 v1, 0xffff0000, v4
; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
-; GFX950-NEXT: v_and_b32_e32 v5, 0xffff0000, v0
+; GFX950-NEXT: v_and_b32_e32 v7, 0xffff0000, v0
+; GFX950-NEXT: v_fmac_f32_e32 v1, v7, v3
+; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v4
; GFX950-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX950-NEXT: v_mul_f32_e32 v3, v5, v3
-; GFX950-NEXT: v_mul_f32_e32 v0, v0, v2
-; GFX950-NEXT: v_cvt_pk_bf16_f32 v6, v6, s0
-; GFX950-NEXT: v_cvt_pk_bf16_f32 v3, v3, s0
-; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v0, s0
-; GFX950-NEXT: v_lshlrev_b32_e32 v6, 16, v6
-; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; GFX950-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
-; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX950-NEXT: v_lshlrev_b32_e32 v2, 16, v4
-; GFX950-NEXT: v_add_f32_e32 v6, v6, v7
-; GFX950-NEXT: v_add_f32_e32 v3, v3, v5
-; GFX950-NEXT: v_add_f32_e32 v0, v0, v2
-; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v0, v3
-; GFX950-NEXT: v_cvt_pk_bf16_f32 v1, v1, v6
+; GFX950-NEXT: v_fmac_f32_e32 v3, v0, v2
+; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v3, v1
+; GFX950-NEXT: v_cvt_pk_bf16_f32 v1, v5, v6
; GFX950-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: v_fmuladd_v4bf16:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v3
-; GFX10-NEXT: v_lshlrev_b32_e32 v7, 16, v1
+; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v5
+; GFX10-NEXT: v_lshlrev_b32_e32 v7, 16, v3
+; GFX10-NEXT: v_lshlrev_b32_e32 v8, 16, v1
+; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX10-NEXT: v_lshlrev_b32_e32 v9, 16, v0
-; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT: v_mul_f32_e32 v6, v7, v6
-; GFX10-NEXT: v_lshlrev_b32_e32 v7, 16, v2
+; GFX10-NEXT: v_fmac_f32_e32 v6, v8, v7
+; GFX10-NEXT: v_lshlrev_b32_e32 v7, 16, v4
+; GFX10-NEXT: v_lshlrev_b32_e32 v8, 16, v2
+; GFX10-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX10-NEXT: v_mul_f32_e32 v1, v1, v3
-; GFX10-NEXT: v_lshlrev_b32_e32 v8, 16, v5
+; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
; GFX10-NEXT: v_bfe_u32 v10, v6, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v3, 0x400000, v6
-; GFX10-NEXT: v_mul_f32_e32 v7, v9, v7
-; GFX10-NEXT: v_mul_f32_e32 v0, v0, v2
-; GFX10-NEXT: v_bfe_u32 v2, v1, 16, 1
-; GFX10-NEXT: v_add3_u32 v10, v10, v6, 0x7fff
+; GFX10-NEXT: v_fmac_f32_e32 v5, v1, v3
+; GFX10-NEXT: v_fmac_f32_e32 v7, v9, v8
+; GFX10-NEXT: v_or_b32_e32 v1, 0x400000, v6
+; GFX10-NEXT: v_fmac_f32_e32 v4, v0, v2
+; GFX10-NEXT: v_add3_u32 v0, v10, v6, 0x7fff
+; GFX10-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX10-NEXT: v_bfe_u32 v3, v7, 16, 1
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v1
-; GFX10-NEXT: v_bfe_u32 v9, v7, 16, 1
-; GFX10-NEXT: v_add3_u32 v2, v2, v1, 0x7fff
-; GFX10-NEXT: v_bfe_u32 v11, v0, 16, 1
-; GFX10-NEXT: v_cndmask_b32_e32 v3, v10, v3, vcc_lo
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX10-NEXT: v_or_b32_e32 v10, 0x400000, v7
-; GFX10-NEXT: v_add3_u32 v9, v9, v7, 0x7fff
-; GFX10-NEXT: v_or_b32_e32 v12, 0x400000, v0
-; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX10-NEXT: v_cndmask_b32_e32 v1, v2, v6, vcc_lo
+; GFX10-NEXT: v_bfe_u32 v8, v4, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v0, v1, vcc_lo
+; GFX10-NEXT: v_add3_u32 v0, v2, v5, 0x7fff
+; GFX10-NEXT: v_add3_u32 v2, v3, v7, 0x7fff
+; GFX10-NEXT: v_or_b32_e32 v3, 0x400000, v7
; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX10-NEXT: v_add3_u32 v11, v11, v0, 0x7fff
-; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
-; GFX10-NEXT: v_add_f32_e32 v3, v3, v8
-; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX10-NEXT: v_cndmask_b32_e32 v2, v9, v10, vcc_lo
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v4
-; GFX10-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GFX10-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; GFX10-NEXT: v_cndmask_b32_e32 v0, v11, v12, vcc_lo
-; GFX10-NEXT: v_add_f32_e32 v1, v1, v5
-; GFX10-NEXT: v_or_b32_e32 v5, 0x400000, v3
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX10-NEXT: v_add_f32_e32 v2, v2, v6
-; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX10-NEXT: v_bfe_u32 v6, v1, 16, 1
-; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX10-NEXT: v_add_f32_e32 v0, v0, v4
-; GFX10-NEXT: v_add3_u32 v4, v7, v3, 0x7fff
-; GFX10-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX10-NEXT: v_bfe_u32 v8, v0, 16, 1
-; GFX10-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc_lo
-; GFX10-NEXT: v_add3_u32 v4, v6, v1, 0x7fff
-; GFX10-NEXT: v_add3_u32 v5, v7, v2, 0x7fff
-; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v2
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX10-NEXT: v_add3_u32 v7, v8, v0, 0x7fff
-; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX10-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX10-NEXT: v_cndmask_b32_e32 v0, v7, v8, vcc_lo
-; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX10-NEXT: v_perm_b32 v0, v0, v2, 0x7060302
-; GFX10-NEXT: v_cndmask_b32_e32 v1, v4, v9, vcc_lo
-; GFX10-NEXT: v_perm_b32 v1, v1, v3, 0x7060302
+; GFX10-NEXT: v_add3_u32 v6, v8, v4, 0x7fff
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX10-NEXT: v_cndmask_b32_e32 v4, v0, v9, vcc_lo
+; GFX10-NEXT: v_perm_b32 v0, v3, v2, 0x7060302
+; GFX10-NEXT: v_perm_b32 v1, v4, v1, 0x7060302
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11TRUE16-LABEL: v_fmuladd_v4bf16:
; GFX11TRUE16: ; %bb.0:
; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v3
-; GFX11TRUE16-NEXT: v_and_b32_e32 v9, 0xffff0000, v0
-; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX11TRUE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v1
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11TRUE16-NEXT: v_dual_mul_f32 v6, v7, v6 :: v_dual_lshlrev_b32 v3, 16, v3
-; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11TRUE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v3
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v10, 16, v0
+; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX11TRUE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v1
; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; GFX11TRUE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v5
-; GFX11TRUE16-NEXT: v_bfe_u32 v10, v6, 16, 1
+; GFX11TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v5
; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11TRUE16-NEXT: v_mul_f32_e32 v1, v1, v3
-; GFX11TRUE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v2
-; GFX11TRUE16-NEXT: v_dual_mul_f32 v3, v9, v7 :: v_dual_lshlrev_b32 v2, 16, v2
-; GFX11TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v6
-; GFX11TRUE16-NEXT: v_add3_u32 v9, v10, v6, 0x7fff
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
-; GFX11TRUE16-NEXT: v_mul_f32_e32 v0, v0, v2
-; GFX11TRUE16-NEXT: v_bfe_u32 v2, v1, 16, 1
-; GFX11TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v1
-; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v6, v9, v7, vcc_lo
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11TRUE16-NEXT: v_bfe_u32 v9, v0, 16, 1
-; GFX11TRUE16-NEXT: v_add3_u32 v2, v2, v1, 0x7fff
-; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v0
-; GFX11TRUE16-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11TRUE16-NEXT: v_add3_u32 v9, v9, v0, 0x7fff
-; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v2, v10, vcc_lo
-; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11TRUE16-NEXT: v_add3_u32 v7, v7, v3, 0x7fff
-; GFX11TRUE16-NEXT: v_dual_cndmask_b32 v0, v9, v11 :: v_dual_and_b32 v1, 0xffff0000, v1
-; GFX11TRUE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
-; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11TRUE16-NEXT: v_dual_add_f32 v1, v1, v5 :: v_dual_and_b32 v0, 0xffff0000, v0
-; GFX11TRUE16-NEXT: v_add_f32_e32 v2, v6, v8
-; GFX11TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v3
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11TRUE16-NEXT: v_bfe_u32 v5, v1, 16, 1
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11TRUE16-NEXT: v_fmac_f32_e32 v5, v1, v3
+; GFX11TRUE16-NEXT: v_dual_fmac_f32 v6, v8, v7 :: v_dual_lshlrev_b32 v7, 16, v4
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v8, 16, v2
+; GFX11TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v4
+; GFX11TRUE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11TRUE16-NEXT: v_bfe_u32 v3, v5, 16, 1
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11TRUE16-NEXT: v_fmac_f32_e32 v7, v10, v8
+; GFX11TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v5
+; GFX11TRUE16-NEXT: v_fmac_f32_e32 v1, v0, v2
+; GFX11TRUE16-NEXT: v_add3_u32 v3, v3, v5, 0x7fff
+; GFX11TRUE16-NEXT: v_bfe_u32 v9, v6, 16, 1
+; GFX11TRUE16-NEXT: v_bfe_u32 v0, v7, 16, 1
+; GFX11TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v7
+; GFX11TRUE16-NEXT: v_or_b32_e32 v2, 0x400000, v6
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11TRUE16-NEXT: v_add3_u32 v0, v0, v7, 0x7fff
+; GFX11TRUE16-NEXT: v_add3_u32 v4, v9, v6, 0x7fff
+; GFX11TRUE16-NEXT: v_bfe_u32 v9, v1, 16, 1
; GFX11TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v1
-; GFX11TRUE16-NEXT: v_dual_cndmask_b32 v3, v7, v6 :: v_dual_lshlrev_b32 v6, 16, v4
-; GFX11TRUE16-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GFX11TRUE16-NEXT: v_add3_u32 v5, v5, v1, 0x7fff
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11TRUE16-NEXT: v_dual_add_f32 v0, v0, v6 :: v_dual_and_b32 v3, 0xffff0000, v3
-; GFX11TRUE16-NEXT: v_add3_u32 v6, v7, v2, 0x7fff
-; GFX11TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v2
+; GFX11TRUE16-NEXT: v_mov_b16_e32 v3.l, v3.h
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v0, v10, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11TRUE16-NEXT: v_add3_u32 v5, v9, v1, 0x7fff
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc_lo
; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11TRUE16-NEXT: v_add_f32_e32 v3, v3, v4
-; GFX11TRUE16-NEXT: v_bfe_u32 v4, v0, 16, 1
-; GFX11TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v0
; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v5, v8, vcc_lo
-; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11TRUE16-NEXT: v_add3_u32 v4, v4, v0, 0x7fff
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, v1.h
-; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v4, v10, vcc_lo
-; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v0.h
-; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v2, v6, v7, vcc_lo
-; GFX11TRUE16-NEXT: v_bfe_u32 v9, v3, 16, 1
-; GFX11TRUE16-NEXT: v_or_b32_e32 v8, 0x400000, v3
-; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11TRUE16-NEXT: v_bfi_b32 v1, 0xffff, v1, v2
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11TRUE16-NEXT: v_add3_u32 v5, v9, v3, 0x7fff
-; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v3, v5, v8, vcc_lo
-; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0xffff, v0, v3
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11TRUE16-NEXT: v_bfi_b32 v0, 0xffff, v0, v1
+; GFX11TRUE16-NEXT: v_bfi_b32 v1, 0xffff, v3, v2
; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31]
;
; GFX11FAKE16-LABEL: v_fmuladd_v4bf16:
; GFX11FAKE16: ; %bb.0:
; GFX11FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v8, 16, v1
+; GFX11FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v9, 16, v0
; GFX11FAKE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v7, 16, v1
-; GFX11FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v8, 16, v5
-; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v3
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v7, 16, v3
; GFX11FAKE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11FAKE16-NEXT: v_dual_mul_f32 v6, v7, v6 :: v_dual_and_b32 v5, 0xffff0000, v5
-; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v7, 16, v2
-; GFX11FAKE16-NEXT: v_dual_mul_f32 v1, v1, v3 :: v_dual_and_b32 v2, 0xffff0000, v2
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v5
+; GFX11FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11FAKE16-NEXT: v_fmac_f32_e32 v5, v1, v3
+; GFX11FAKE16-NEXT: v_dual_fmac_f32 v6, v8, v7 :: v_dual_lshlrev_b32 v7, 16, v4
+; GFX11FAKE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_1)
; GFX11FAKE16-NEXT: v_bfe_u32 v10, v6, 16, 1
-; GFX11FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v6
+; GFX11FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v6
; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
-; GFX11FAKE16-NEXT: v_mul_f32_e32 v7, v9, v7
-; GFX11FAKE16-NEXT: v_add3_u32 v10, v10, v6, 0x7fff
-; GFX11FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v1
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11FAKE16-NEXT: v_bfe_u32 v9, v7, 16, 1
-; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v3, v10, v3 :: v_dual_mul_f32 v0, v0, v2
-; GFX11FAKE16-NEXT: v_bfe_u32 v2, v1, 16, 1
-; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11FAKE16-NEXT: v_or_b32_e32 v10, 0x400000, v7
-; GFX11FAKE16-NEXT: v_add3_u32 v9, v9, v7, 0x7fff
-; GFX11FAKE16-NEXT: v_bfe_u32 v11, v0, 16, 1
-; GFX11FAKE16-NEXT: v_add3_u32 v2, v2, v1, 0x7fff
-; GFX11FAKE16-NEXT: v_or_b32_e32 v12, 0x400000, v0
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11FAKE16-NEXT: v_add3_u32 v11, v11, v0, 0x7fff
-; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v1, v2, v6 :: v_dual_lshlrev_b32 v6, 16, v4
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v8, 16, v2
+; GFX11FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11FAKE16-NEXT: v_fmac_f32_e32 v4, v0, v2
+; GFX11FAKE16-NEXT: v_add3_u32 v0, v10, v6, 0x7fff
+; GFX11FAKE16-NEXT: v_bfe_u32 v2, v5, 16, 1
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v1, v0, v1, vcc_lo
+; GFX11FAKE16-NEXT: v_fmac_f32_e32 v7, v9, v8
+; GFX11FAKE16-NEXT: v_bfe_u32 v8, v4, 16, 1
+; GFX11FAKE16-NEXT: v_add3_u32 v0, v2, v5, 0x7fff
+; GFX11FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v5
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11FAKE16-NEXT: v_bfe_u32 v3, v7, 16, 1
; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11FAKE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v2, v9, v10 :: v_dual_and_b32 v1, 0xffff0000, v1
-; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11FAKE16-NEXT: v_dual_add_f32 v1, v1, v5 :: v_dual_and_b32 v2, 0xffff0000, v2
-; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v0, v11, v12, vcc_lo
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11FAKE16-NEXT: v_add_f32_e32 v2, v2, v6
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11FAKE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX11FAKE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; GFX11FAKE16-NEXT: v_bfe_u32 v6, v1, 16, 1
-; GFX11FAKE16-NEXT: v_add_f32_e32 v0, v0, v4
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11FAKE16-NEXT: v_add_f32_e32 v3, v3, v8
-; GFX11FAKE16-NEXT: v_bfe_u32 v8, v0, 16, 1
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX11FAKE16-NEXT: v_bfe_u32 v7, v3, 16, 1
-; GFX11FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v3
-; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11FAKE16-NEXT: v_add3_u32 v4, v7, v3, 0x7fff
-; GFX11FAKE16-NEXT: v_bfe_u32 v7, v2, 16, 1
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v3, v4, v5, vcc_lo
-; GFX11FAKE16-NEXT: v_add3_u32 v4, v6, v1, 0x7fff
-; GFX11FAKE16-NEXT: v_add3_u32 v5, v7, v2, 0x7fff
-; GFX11FAKE16-NEXT: v_or_b32_e32 v6, 0x400000, v2
-; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11FAKE16-NEXT: v_add3_u32 v7, v8, v0, 0x7fff
-; GFX11FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v0
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v2, v5, v6, vcc_lo
-; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
-; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v0, v7, v8, vcc_lo
-; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11FAKE16-NEXT: v_perm_b32 v0, v0, v2, 0x7060302
-; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v1, v4, v9, vcc_lo
-; GFX11FAKE16-NEXT: v_perm_b32 v1, v1, v3, 0x7060302
+; GFX11FAKE16-NEXT: v_add3_u32 v6, v8, v4, 0x7fff
+; GFX11FAKE16-NEXT: v_or_b32_e32 v8, 0x400000, v4
+; GFX11FAKE16-NEXT: v_add3_u32 v2, v3, v7, 0x7fff
+; GFX11FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v7
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_3)
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v3, v6, v8, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v4, v0, v9, vcc_lo
+; GFX11FAKE16-NEXT: v_perm_b32 v0, v3, v2, 0x7060302
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11FAKE16-NEXT: v_perm_b32 v1, v4, v1, 0x7060302
; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31]
%op = call <4 x bfloat> @llvm.fmuladd.v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfloat> %c)
ret <4 x bfloat> %op
diff --git a/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll b/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll
index 7d36c9f..004d3c0 100644
--- a/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat-saddr-atomics.ll
@@ -284,6 +284,7 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn(ptr inreg %sbase, i32 %vof
; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: s_clause 0x1
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
@@ -329,6 +330,7 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn(ptr inreg %sbase, i32 %vof
; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
+; GFX1250-GISEL-NEXT: s_clause 0x1
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[4:5], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
@@ -382,6 +384,7 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn_neg128(ptr inreg %sbase, i
; GFX1250-SDAG-NEXT: v_subrev_nc_u32_e32 v0, s1, v4
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-SDAG-NEXT: v_cndmask_b32_e32 v4, -1, v0, vcc_lo
+; GFX1250-SDAG-NEXT: s_clause 0x1
; GFX1250-SDAG-NEXT: scratch_load_b64 v[0:1], v4, off
; GFX1250-SDAG-NEXT: scratch_store_b64 v4, v[2:3], off scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
@@ -430,6 +433,7 @@ define amdgpu_ps <2 x float> @flat_xchg_saddr_i64_rtn_neg128(ptr inreg %sbase, i
; GFX1250-GISEL-NEXT: v_subrev_nc_u32_e32 v0, s1, v6
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_cndmask_b32_e32 v2, -1, v0, vcc_lo
+; GFX1250-GISEL-NEXT: s_clause 0x1
; GFX1250-GISEL-NEXT: scratch_load_b64 v[0:1], v2, off
; GFX1250-GISEL-NEXT: scratch_store_b64 v2, v[4:5], off scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
diff --git a/llvm/test/CodeGen/AMDGPU/flat-scratch.ll b/llvm/test/CodeGen/AMDGPU/flat-scratch.ll
index b25d9b2..fc88839 100644
--- a/llvm/test/CodeGen/AMDGPU/flat-scratch.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat-scratch.ll
@@ -3621,7 +3621,8 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() {
; GFX9-NEXT: s_mov_b32 s0, 0
; GFX9-NEXT: scratch_store_dword off, v0, s0 offset:4
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: s_movk_i32 s0, 0x3004
+; GFX9-NEXT: s_movk_i32 s0, 0x3000
+; GFX9-NEXT: s_add_i32 s0, s0, 4
; GFX9-NEXT: v_mov_b32_e32 v0, 15
; GFX9-NEXT: scratch_store_dword off, v0, s0 offset:3712
; GFX9-NEXT: s_waitcnt vmcnt(0)
@@ -3637,7 +3638,8 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() {
; GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s9
; GFX10-NEXT: v_mov_b32_e32 v0, 13
; GFX10-NEXT: v_mov_b32_e32 v1, 15
-; GFX10-NEXT: s_movk_i32 s0, 0x3804
+; GFX10-NEXT: s_movk_i32 s0, 0x3800
+; GFX10-NEXT: s_add_i32 s0, s0, 4
; GFX10-NEXT: scratch_store_dword off, v0, off offset:4
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: scratch_store_dword off, v1, s0 offset:1664
@@ -3682,7 +3684,8 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() {
; GFX9-PAL-NEXT: s_addc_u32 flat_scratch_hi, s13, 0
; GFX9-PAL-NEXT: scratch_store_dword off, v0, s0 offset:4
; GFX9-PAL-NEXT: s_waitcnt vmcnt(0)
-; GFX9-PAL-NEXT: s_movk_i32 s0, 0x3004
+; GFX9-PAL-NEXT: s_movk_i32 s0, 0x3000
+; GFX9-PAL-NEXT: s_add_i32 s0, s0, 4
; GFX9-PAL-NEXT: v_mov_b32_e32 v0, 15
; GFX9-PAL-NEXT: scratch_store_dword off, v0, s0 offset:3712
; GFX9-PAL-NEXT: s_waitcnt vmcnt(0)
@@ -3716,8 +3719,9 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() {
; GFX1010-PAL-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s13
; GFX1010-PAL-NEXT: v_mov_b32_e32 v0, 13
; GFX1010-PAL-NEXT: v_mov_b32_e32 v1, 15
+; GFX1010-PAL-NEXT: s_movk_i32 s0, 0x3800
; GFX1010-PAL-NEXT: s_mov_b32 s1, 0
-; GFX1010-PAL-NEXT: s_movk_i32 s0, 0x3804
+; GFX1010-PAL-NEXT: s_add_i32 s0, s0, 4
; GFX1010-PAL-NEXT: scratch_store_dword off, v0, s1 offset:4
; GFX1010-PAL-NEXT: s_waitcnt_vscnt null, 0x0
; GFX1010-PAL-NEXT: scratch_store_dword off, v1, s0 offset:1664
@@ -3739,7 +3743,8 @@ define amdgpu_kernel void @store_load_large_imm_offset_kernel() {
; GFX1030-PAL-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s13
; GFX1030-PAL-NEXT: v_mov_b32_e32 v0, 13
; GFX1030-PAL-NEXT: v_mov_b32_e32 v1, 15
-; GFX1030-PAL-NEXT: s_movk_i32 s0, 0x3804
+; GFX1030-PAL-NEXT: s_movk_i32 s0, 0x3800
+; GFX1030-PAL-NEXT: s_add_i32 s0, s0, 4
; GFX1030-PAL-NEXT: scratch_store_dword off, v0, off offset:4
; GFX1030-PAL-NEXT: s_waitcnt_vscnt null, 0x0
; GFX1030-PAL-NEXT: scratch_store_dword off, v1, s0 offset:1664
@@ -3785,10 +3790,12 @@ define void @store_load_large_imm_offset_foo() {
; GFX9-LABEL: store_load_large_imm_offset_foo:
; GFX9: ; %bb.0: ; %bb
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_movk_i32 s0, 0x3000
; GFX9-NEXT: v_mov_b32_e32 v0, 13
+; GFX9-NEXT: s_add_i32 s1, s32, s0
; GFX9-NEXT: scratch_store_dword off, v0, s32 offset:4
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: s_add_i32 s0, s32, 0x3004
+; GFX9-NEXT: s_add_i32 s0, s1, 4
; GFX9-NEXT: v_mov_b32_e32 v0, 15
; GFX9-NEXT: scratch_store_dword off, v0, s0 offset:3712
; GFX9-NEXT: s_waitcnt vmcnt(0)
@@ -3800,8 +3807,10 @@ define void @store_load_large_imm_offset_foo() {
; GFX10: ; %bb.0: ; %bb
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: v_mov_b32_e32 v0, 13
+; GFX10-NEXT: s_movk_i32 s0, 0x3800
; GFX10-NEXT: v_mov_b32_e32 v1, 15
-; GFX10-NEXT: s_add_i32 s0, s32, 0x3804
+; GFX10-NEXT: s_add_i32 s1, s32, s0
+; GFX10-NEXT: s_add_i32 s0, s1, 4
; GFX10-NEXT: scratch_store_dword off, v0, s32 offset:4
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: scratch_store_dword off, v1, s0 offset:1664
@@ -3843,10 +3852,12 @@ define void @store_load_large_imm_offset_foo() {
; GFX9-PAL-LABEL: store_load_large_imm_offset_foo:
; GFX9-PAL: ; %bb.0: ; %bb
; GFX9-PAL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-PAL-NEXT: s_movk_i32 s0, 0x3000
; GFX9-PAL-NEXT: v_mov_b32_e32 v0, 13
+; GFX9-PAL-NEXT: s_add_i32 s1, s32, s0
; GFX9-PAL-NEXT: scratch_store_dword off, v0, s32 offset:4
; GFX9-PAL-NEXT: s_waitcnt vmcnt(0)
-; GFX9-PAL-NEXT: s_add_i32 s0, s32, 0x3004
+; GFX9-PAL-NEXT: s_add_i32 s0, s1, 4
; GFX9-PAL-NEXT: v_mov_b32_e32 v0, 15
; GFX9-PAL-NEXT: scratch_store_dword off, v0, s0 offset:3712
; GFX9-PAL-NEXT: s_waitcnt vmcnt(0)
@@ -3872,8 +3883,10 @@ define void @store_load_large_imm_offset_foo() {
; GFX10-PAL: ; %bb.0: ; %bb
; GFX10-PAL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-PAL-NEXT: v_mov_b32_e32 v0, 13
+; GFX10-PAL-NEXT: s_movk_i32 s0, 0x3800
; GFX10-PAL-NEXT: v_mov_b32_e32 v1, 15
-; GFX10-PAL-NEXT: s_add_i32 s0, s32, 0x3804
+; GFX10-PAL-NEXT: s_add_i32 s1, s32, s0
+; GFX10-PAL-NEXT: s_add_i32 s0, s1, 4
; GFX10-PAL-NEXT: scratch_store_dword off, v0, s32 offset:4
; GFX10-PAL-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-PAL-NEXT: scratch_store_dword off, v1, s0 offset:1664
diff --git a/llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll b/llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll
index 1b092b2..5674ae3 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg-modifier-casting.ll
@@ -349,29 +349,24 @@ define i32 @select_fneg_xor_select_i32(i1 %cond0, i1 %cond1, i32 %arg0, i32 %arg
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_and_b32_e32 v0, 1, v0
-; GCN-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
-; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
; GCN-NEXT: v_and_b32_e32 v1, 1, v1
-; GCN-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
-; GCN-NEXT: v_xor_b32_e32 v2, 0x80000000, v0
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
+; GCN-NEXT: v_cndmask_b32_e64 v0, -v2, v3, vcc
; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v1
-; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GCN-NEXT: v_cndmask_b32_e64 v0, v0, -v0, vcc
; GCN-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: select_fneg_xor_select_i32:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX11-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
; GFX11-NEXT: v_and_b32_e32 v1, 1, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_cndmask_b32_e64 v0, -v2, v3, vcc_lo
; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v1
-; GFX11-NEXT: v_xor_b32_e32 v2, 0x80000000, v0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc_lo
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, -v0, vcc_lo
; GFX11-NEXT: s_setpc_b64 s[30:31]
%fneg0 = xor i32 %arg0, -2147483648
%select0 = select i1 %cond0, i32 %arg1, i32 %fneg0
@@ -550,31 +545,25 @@ define i64 @select_fneg_xor_select_i64(i1 %cond0, i1 %cond1, i64 %arg0, i64 %arg
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_and_b32_e32 v0, 1, v0
-; GCN-NEXT: v_xor_b32_e32 v3, 0x80000000, v3
-; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
; GCN-NEXT: v_and_b32_e32 v1, 1, v1
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
; GCN-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
-; GCN-NEXT: v_cndmask_b32_e32 v2, v3, v5, vcc
-; GCN-NEXT: v_xor_b32_e32 v3, 0x80000000, v2
+; GCN-NEXT: v_cndmask_b32_e64 v2, -v3, v5, vcc
; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 1, v1
-; GCN-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc
+; GCN-NEXT: v_cndmask_b32_e64 v1, v2, -v2, vcc
; GCN-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: select_fneg_xor_select_i64:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX11-NEXT: v_xor_b32_e32 v3, 0x80000000, v3
-; GFX11-NEXT: v_and_b32_e32 v1, 1, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e32 v2, v3, v5, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_dual_cndmask_b32 v0, v2, v4 :: v_dual_and_b32 v1, 1, v1
+; GFX11-NEXT: v_cndmask_b32_e64 v2, -v3, v5, vcc_lo
; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v1
-; GFX11-NEXT: v_xor_b32_e32 v3, 0x80000000, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc_lo
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v2, -v2, vcc_lo
; GFX11-NEXT: s_setpc_b64 s[30:31]
%fneg0 = xor i64 %arg0, 9223372036854775808
%select0 = select i1 %cond0, i64 %arg1, i64 %fneg0
diff --git a/llvm/test/CodeGen/AMDGPU/fold-operands-frame-index.mir b/llvm/test/CodeGen/AMDGPU/fold-operands-frame-index.mir
index 7fad2f4..a88b1ec 100644
--- a/llvm/test/CodeGen/AMDGPU/fold-operands-frame-index.mir
+++ b/llvm/test/CodeGen/AMDGPU/fold-operands-frame-index.mir
@@ -75,7 +75,8 @@ stack:
body: |
bb.0:
; CHECK-LABEL: name: fold_frame_index__s_add_i32__fi_materializedconst_0
- ; CHECK: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 %stack.0, 256, implicit-def $scc
+ ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 256
+ ; CHECK-NEXT: [[S_ADD_I32_:%[0-9]+]]:sreg_32 = S_ADD_I32 %stack.0, [[S_MOV_B32_]], implicit-def $scc
; CHECK-NEXT: $sgpr4 = COPY [[S_ADD_I32_]]
; CHECK-NEXT: SI_RETURN implicit $sgpr4
%0:sreg_32 = S_MOV_B32 %stack.0
diff --git a/llvm/test/CodeGen/AMDGPU/fold-sgpr-multi-imm.mir b/llvm/test/CodeGen/AMDGPU/fold-sgpr-multi-imm.mir
index cc43142..2f2d727 100644
--- a/llvm/test/CodeGen/AMDGPU/fold-sgpr-multi-imm.mir
+++ b/llvm/test/CodeGen/AMDGPU/fold-sgpr-multi-imm.mir
@@ -46,7 +46,8 @@ body: |
%2:sreg_32 = S_LSHL2_ADD_U32 %0, %1, implicit-def $scc
...
# GCN-LABEL: name: test_frameindex{{$}}
-# GCN: %1:sreg_32 = S_ADD_I32 %stack.0, 70
+# GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 70
+# GCN-NEXT: %1:sreg_32 = S_ADD_I32 %stack.0, [[S_MOV_B32_]]
---
name: test_frameindex
tracksRegLiveness: true
diff --git a/llvm/test/CodeGen/AMDGPU/fp64-atomics-gfx90a.ll b/llvm/test/CodeGen/AMDGPU/fp64-atomics-gfx90a.ll
index f9a24fe..0cb2b0b 100644
--- a/llvm/test/CodeGen/AMDGPU/fp64-atomics-gfx90a.ll
+++ b/llvm/test/CodeGen/AMDGPU/fp64-atomics-gfx90a.ll
@@ -2102,23 +2102,10 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret(ptr addrspace(3) %ptr, do
; GFX1250-NEXT: s_load_b32 s2, s[4:5], 0x24
; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x2c
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v2, s2
-; GFX1250-NEXT: s_mov_b32 s2, 0
-; GFX1250-NEXT: ds_load_b64 v[0:1], v0
-; GFX1250-NEXT: .LBB51_1: ; %atomicrmw.start
-; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: v_mov_b32_e32 v2, s2
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
+; GFX1250-NEXT: ds_add_f64 v2, v[0:1]
; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-NEXT: v_add_f64_e32 v[4:5], s[0:1], v[0:1]
-; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[4:5], v2, v[4:5], v[0:1]
-; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[4:5], v[0:1]
-; GFX1250-NEXT: v_mov_b64_e32 v[0:1], v[4:5]
-; GFX1250-NEXT: s_or_b32 s2, vcc_lo, s2
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
-; GFX1250-NEXT: s_cbranch_execnz .LBB51_1
-; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1250-NEXT: s_endpgm
main_body:
%ret = call double @llvm.amdgcn.ds.fadd.f64(ptr addrspace(3) %ptr, double %data, i32 0, i32 0, i1 0)
@@ -2148,24 +2135,9 @@ define double @local_atomic_fadd_f64_rtn(ptr addrspace(3) %ptr, double %data) {
; GFX1250: ; %bb.0: ; %main_body
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_mov_b32 v2, v0
-; GFX1250-NEXT: v_mov_b32_e32 v4, v1
-; GFX1250-NEXT: ds_load_b64 v[0:1], v0
-; GFX1250-NEXT: s_mov_b32 s0, 0
-; GFX1250-NEXT: .LBB52_1: ; %atomicrmw.start
-; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: v_mov_b64_e32 v[6:7], v[0:1]
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX1250-NEXT: v_add_f64_e32 v[0:1], v[6:7], v[4:5]
-; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[0:1], v2, v[0:1], v[6:7]
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: ds_add_rtn_f64 v[0:1], v0, v[2:3]
; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[6:7]
-; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX1250-NEXT: s_cbranch_execnz .LBB52_1
-; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-NEXT: s_set_pc_i64 s[30:31]
main_body:
%ret = call double @llvm.amdgcn.ds.fadd.f64(ptr addrspace(3) %ptr, double %data, i32 0, i32 0, i1 0)
@@ -2197,24 +2169,11 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat(ptr addrspace(3) %ptr
; GFX1250-LABEL: local_atomic_fadd_f64_noret_pat:
; GFX1250: ; %bb.0: ; %main_body
; GFX1250-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], 4.0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v2, s0
-; GFX1250-NEXT: s_mov_b32 s0, 0
-; GFX1250-NEXT: ds_load_b64 v[0:1], v0
-; GFX1250-NEXT: .LBB53_1: ; %atomicrmw.start
-; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: v_mov_b32_e32 v2, s0
+; GFX1250-NEXT: ds_add_f64 v2, v[0:1]
; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-NEXT: v_add_f64_e32 v[4:5], 4.0, v[0:1]
-; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[4:5], v2, v[4:5], v[0:1]
-; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[4:5], v[0:1]
-; GFX1250-NEXT: v_mov_b64_e32 v[0:1], v[4:5]
-; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX1250-NEXT: s_cbranch_execnz .LBB53_1
-; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1250-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst, !amdgpu.no.fine.grained.memory !0
@@ -2246,24 +2205,11 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush(ptr addrspace(3
; GFX1250-LABEL: local_atomic_fadd_f64_noret_pat_flush:
; GFX1250: ; %bb.0: ; %main_body
; GFX1250-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], 4.0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v2, s0
-; GFX1250-NEXT: s_mov_b32 s0, 0
-; GFX1250-NEXT: ds_load_b64 v[0:1], v0
-; GFX1250-NEXT: .LBB54_1: ; %atomicrmw.start
-; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-NEXT: v_add_f64_e32 v[4:5], 4.0, v[0:1]
-; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[4:5], v2, v[4:5], v[0:1]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s0
+; GFX1250-NEXT: ds_add_f64 v2, v[0:1]
; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[4:5], v[0:1]
-; GFX1250-NEXT: v_mov_b64_e32 v[0:1], v[4:5]
-; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX1250-NEXT: s_cbranch_execnz .LBB54_1
-; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1250-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst, !amdgpu.no.fine.grained.memory !0
@@ -2295,24 +2241,11 @@ define amdgpu_kernel void @local_atomic_fadd_f64_noret_pat_flush_safe(ptr addrsp
; GFX1250-LABEL: local_atomic_fadd_f64_noret_pat_flush_safe:
; GFX1250: ; %bb.0: ; %main_body
; GFX1250-NEXT: s_load_b32 s0, s[4:5], 0x24
+; GFX1250-NEXT: v_mov_b64_e32 v[0:1], 4.0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v2, s0
-; GFX1250-NEXT: s_mov_b32 s0, 0
-; GFX1250-NEXT: ds_load_b64 v[0:1], v0
-; GFX1250-NEXT: .LBB55_1: ; %atomicrmw.start
-; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-NEXT: v_add_f64_e32 v[4:5], 4.0, v[0:1]
-; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[4:5], v2, v[4:5], v[0:1]
+; GFX1250-NEXT: v_mov_b32_e32 v2, s0
+; GFX1250-NEXT: ds_add_f64 v2, v[0:1]
; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[4:5], v[0:1]
-; GFX1250-NEXT: v_mov_b64_e32 v[0:1], v[4:5]
-; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX1250-NEXT: s_cbranch_execnz .LBB55_1
-; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1250-NEXT: s_endpgm
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst
@@ -2341,23 +2274,9 @@ define double @local_atomic_fadd_f64_rtn_pat(ptr addrspace(3) %ptr, double %data
; GFX1250: ; %bb.0: ; %main_body
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_mov_b32_e32 v2, v0
-; GFX1250-NEXT: ds_load_b64 v[0:1], v0
-; GFX1250-NEXT: s_mov_b32 s0, 0
-; GFX1250-NEXT: .LBB56_1: ; %atomicrmw.start
-; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: v_mov_b64_e32 v[4:5], v[0:1]
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX1250-NEXT: v_add_f64_e32 v[0:1], 4.0, v[4:5]
-; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[0:1], v2, v[0:1], v[4:5]
+; GFX1250-NEXT: v_mov_b64_e32 v[2:3], 4.0
+; GFX1250-NEXT: ds_add_rtn_f64 v[0:1], v0, v[2:3]
; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[4:5]
-; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX1250-NEXT: s_cbranch_execnz .LBB56_1
-; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-NEXT: s_set_pc_i64 s[30:31]
main_body:
%ret = atomicrmw fadd ptr addrspace(3) %ptr, double 4.0 seq_cst, !amdgpu.no.fine.grained.memory !0
@@ -2387,24 +2306,9 @@ define double @local_atomic_fadd_f64_rtn_ieee_unsafe(ptr addrspace(3) %ptr, doub
; GFX1250: ; %bb.0: ; %main_body
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_mov_b32 v2, v0
-; GFX1250-NEXT: v_mov_b32_e32 v4, v1
-; GFX1250-NEXT: ds_load_b64 v[0:1], v0
-; GFX1250-NEXT: s_mov_b32 s0, 0
-; GFX1250-NEXT: .LBB57_1: ; %atomicrmw.start
-; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: v_mov_b64_e32 v[6:7], v[0:1]
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX1250-NEXT: v_add_f64_e32 v[0:1], v[6:7], v[4:5]
-; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[0:1], v2, v[0:1], v[6:7]
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: ds_add_rtn_f64 v[0:1], v0, v[2:3]
; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[6:7]
-; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX1250-NEXT: s_cbranch_execnz .LBB57_1
-; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-NEXT: s_set_pc_i64 s[30:31]
main_body:
%ret = call double @llvm.amdgcn.ds.fadd.f64(ptr addrspace(3) %ptr, double %data, i32 0, i32 0, i1 0)
@@ -2434,24 +2338,9 @@ define double @local_atomic_fadd_f64_rtn_ieee_safe(ptr addrspace(3) %ptr, double
; GFX1250: ; %bb.0: ; %main_body
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_dual_mov_b32 v5, v2 :: v_dual_mov_b32 v2, v0
-; GFX1250-NEXT: v_mov_b32_e32 v4, v1
-; GFX1250-NEXT: ds_load_b64 v[0:1], v0
-; GFX1250-NEXT: s_mov_b32 s0, 0
-; GFX1250-NEXT: .LBB58_1: ; %atomicrmw.start
-; GFX1250-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: v_mov_b64_e32 v[6:7], v[0:1]
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX1250-NEXT: v_add_f64_e32 v[0:1], v[6:7], v[4:5]
-; GFX1250-NEXT: ds_cmpstore_rtn_b64 v[0:1], v2, v[0:1], v[6:7]
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: ds_add_rtn_f64 v[0:1], v0, v[2:3]
; GFX1250-NEXT: s_wait_dscnt 0x0
-; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[6:7]
-; GFX1250-NEXT: s_or_b32 s0, vcc_lo, s0
-; GFX1250-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GFX1250-NEXT: s_cbranch_execnz .LBB58_1
-; GFX1250-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1250-NEXT: s_or_b32 exec_lo, exec_lo, s0
; GFX1250-NEXT: s_set_pc_i64 s[30:31]
main_body:
%ret = call double @llvm.amdgcn.ds.fadd.f64(ptr addrspace(3) %ptr, double %data, i32 0, i32 0, i1 0)
diff --git a/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll b/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll
index 15cda62..f2fe61f 100644
--- a/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll
+++ b/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll
@@ -360,7 +360,8 @@ entry:
; s_add_i32.
; GCN-LABEL: {{^}}fi_sop2_s_add_u32_literal_error:
-; GCN: s_add_u32 [[ADD_LO:s[0-9]+]], 0, 0x2010
+; GCN: s_movk_i32 [[S_MOVK_I32_:s[0-9]+]], 0x1000
+; GCN: s_add_u32 [[ADD_LO:s[0-9]+]], 0x1010, [[S_MOVK_I32_]]
; GCN: s_addc_u32 [[ADD_HI:s[0-9]+]], s{{[0-9]+}}, 0
define amdgpu_kernel void @fi_sop2_s_add_u32_literal_error() #0 {
entry:
diff --git a/llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll b/llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll
index 3a898a9..f0db321 100644
--- a/llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll
@@ -244,8 +244,9 @@ define i32 @test_v64i32_load_store(ptr addrspace(1) %ptr, i32 %idx, ptr addrspac
; GCN-GISEL-NEXT: global_load_b128 v[60:63], v[0:1], off offset:16
; GCN-GISEL-NEXT: global_load_b128 v[0:3], v[0:1], off offset:240
; GCN-GISEL-NEXT: s_wait_loadcnt 0x0
-; GCN-GISEL-NEXT: scratch_store_b128 off, v[0:3], s32 offset:64 scope:SCOPE_SE ; 16-byte Folded Spill
-; GCN-GISEL-NEXT: scratch_load_b128 v[0:3], off, s32 offset:80 th:TH_LOAD_LU ; 16-byte Folded Reload
+; GCN-GISEL-NEXT: s_clause 0x1
+; GCN-GISEL-NEXT: scratch_store_b128 off, v[0:3], s32 offset:64 scope:SCOPE_SE
+; GCN-GISEL-NEXT: scratch_load_b128 v[0:3], off, s32 offset:80 th:TH_LOAD_LU
; GCN-GISEL-NEXT: s_wait_loadcnt 0x0
; GCN-GISEL-NEXT: s_clause 0xe
; GCN-GISEL-NEXT: global_store_b128 v[46:47], v[0:3], off offset:32
diff --git a/llvm/test/CodeGen/AMDGPU/hard-clauses-gfx1250.mir b/llvm/test/CodeGen/AMDGPU/hard-clauses-gfx1250.mir
index 8007597..492753b 100644
--- a/llvm/test/CodeGen/AMDGPU/hard-clauses-gfx1250.mir
+++ b/llvm/test/CodeGen/AMDGPU/hard-clauses-gfx1250.mir
@@ -1,6 +1,507 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -run-pass si-insert-hard-clauses %s -o - | FileCheck %s -check-prefixes=GFX12
-# RUN: llc -mtriple=amdgcn -mcpu=gfx1250 -run-pass si-insert-hard-clauses %s -o - | FileCheck %s -check-prefixes=GFX12
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -run-pass si-insert-hard-clauses %s -o - | FileCheck %s -check-prefixes=GFX12,GFX1200
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1250 -run-pass si-insert-hard-clauses %s -o - | FileCheck %s -check-prefixes=GFX12,GFX1250
+
+---
+name: long_clause
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $vgpr0
+ ; GFX1200-LABEL: name: long_clause
+ ; GFX1200: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $vgpr0
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: BUNDLE implicit-def $vgpr1, implicit-def $vgpr1_lo16, implicit-def $vgpr1_hi16, implicit-def $vgpr2, implicit-def $vgpr2_lo16, implicit-def $vgpr2_hi16, implicit-def $vgpr3, implicit-def $vgpr3_lo16, implicit-def $vgpr3_hi16, implicit-def $vgpr4, implicit-def $vgpr4_lo16, implicit-def $vgpr4_hi16, implicit-def $vgpr5, implicit-def $vgpr5_lo16, implicit-def $vgpr5_hi16, implicit-def $vgpr6, implicit-def $vgpr6_lo16, implicit-def $vgpr6_hi16, implicit-def $vgpr7, implicit-def $vgpr7_lo16, implicit-def $vgpr7_hi16, implicit-def $vgpr8, implicit-def $vgpr8_lo16, implicit-def $vgpr8_hi16, implicit-def $vgpr9, implicit-def $vgpr9_lo16, implicit-def $vgpr9_hi16, implicit-def $vgpr10, implicit-def $vgpr10_lo16, implicit-def $vgpr10_hi16, implicit-def $vgpr11, implicit-def $vgpr11_lo16, implicit-def $vgpr11_hi16, implicit-def $vgpr12, implicit-def $vgpr12_lo16, implicit-def $vgpr12_hi16, implicit-def $vgpr13, implicit-def $vgpr13_lo16, implicit-def $vgpr13_hi16, implicit-def $vgpr14, implicit-def $vgpr14_lo16, implicit-def $vgpr14_hi16, implicit-def $vgpr15, implicit-def $vgpr15_lo16, implicit-def $vgpr15_hi16, implicit-def $vgpr16, implicit-def $vgpr16_lo16, implicit-def $vgpr16_hi16, implicit-def $vgpr17, implicit-def $vgpr17_lo16, implicit-def $vgpr17_hi16, implicit-def $vgpr18, implicit-def $vgpr18_lo16, implicit-def $vgpr18_hi16, implicit-def $vgpr19, implicit-def $vgpr19_lo16, implicit-def $vgpr19_hi16, implicit-def $vgpr20, implicit-def $vgpr20_lo16, implicit-def $vgpr20_hi16, implicit-def $vgpr21, implicit-def $vgpr21_lo16, implicit-def $vgpr21_hi16, implicit-def $vgpr22, implicit-def $vgpr22_lo16, implicit-def $vgpr22_hi16, implicit-def $vgpr23, implicit-def $vgpr23_lo16, implicit-def $vgpr23_hi16, implicit-def $vgpr24, implicit-def $vgpr24_lo16, implicit-def $vgpr24_hi16, implicit-def $vgpr25, implicit-def $vgpr25_lo16, implicit-def $vgpr25_hi16, implicit-def $vgpr26, implicit-def $vgpr26_lo16, implicit-def $vgpr26_hi16, implicit-def $vgpr27, implicit-def $vgpr27_lo16, implicit-def $vgpr27_hi16, implicit-def $vgpr28, implicit-def $vgpr28_lo16, implicit-def $vgpr28_hi16, implicit-def $vgpr29, implicit-def $vgpr29_lo16, implicit-def $vgpr29_hi16, implicit-def $vgpr30, implicit-def $vgpr30_lo16, implicit-def $vgpr30_hi16, implicit-def $vgpr31, implicit-def $vgpr31_lo16, implicit-def $vgpr31_hi16, implicit-def $vgpr32, implicit-def $vgpr32_lo16, implicit-def $vgpr32_hi16, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $exec {
+ ; GFX1200-NEXT: S_CLAUSE 31
+ ; GFX1200-NEXT: $vgpr1 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 4, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr2 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 8, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr3 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 12, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr4 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 16, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr5 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 20, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr6 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 24, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr7 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 28, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr8 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 32, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr9 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 36, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr10 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 40, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr11 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 44, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr12 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 48, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr13 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 52, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr14 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 56, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr15 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 60, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr16 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 64, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr17 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 68, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr18 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 72, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr19 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 76, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr20 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 80, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr21 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 84, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr22 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 88, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr23 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 92, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr24 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 96, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr25 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 100, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr26 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 104, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr27 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 108, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr28 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 112, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr29 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 116, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr30 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 120, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr31 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 124, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr32 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 128, 0, 0, implicit $exec
+ ; GFX1200-NEXT: }
+ ; GFX1200-NEXT: BUNDLE implicit-def $vgpr33, implicit-def $vgpr33_lo16, implicit-def $vgpr33_hi16, implicit-def $vgpr34, implicit-def $vgpr34_lo16, implicit-def $vgpr34_hi16, implicit-def $vgpr35, implicit-def $vgpr35_lo16, implicit-def $vgpr35_hi16, implicit-def $vgpr36, implicit-def $vgpr36_lo16, implicit-def $vgpr36_hi16, implicit-def $vgpr37, implicit-def $vgpr37_lo16, implicit-def $vgpr37_hi16, implicit-def $vgpr38, implicit-def $vgpr38_lo16, implicit-def $vgpr38_hi16, implicit-def $vgpr39, implicit-def $vgpr39_lo16, implicit-def $vgpr39_hi16, implicit-def $vgpr40, implicit-def $vgpr40_lo16, implicit-def $vgpr40_hi16, implicit-def $vgpr41, implicit-def $vgpr41_lo16, implicit-def $vgpr41_hi16, implicit-def $vgpr42, implicit-def $vgpr42_lo16, implicit-def $vgpr42_hi16, implicit-def $vgpr43, implicit-def $vgpr43_lo16, implicit-def $vgpr43_hi16, implicit-def $vgpr44, implicit-def $vgpr44_lo16, implicit-def $vgpr44_hi16, implicit-def $vgpr45, implicit-def $vgpr45_lo16, implicit-def $vgpr45_hi16, implicit-def $vgpr46, implicit-def $vgpr46_lo16, implicit-def $vgpr46_hi16, implicit-def $vgpr47, implicit-def $vgpr47_lo16, implicit-def $vgpr47_hi16, implicit-def $vgpr48, implicit-def $vgpr48_lo16, implicit-def $vgpr48_hi16, implicit-def $vgpr49, implicit-def $vgpr49_lo16, implicit-def $vgpr49_hi16, implicit-def $vgpr50, implicit-def $vgpr50_lo16, implicit-def $vgpr50_hi16, implicit-def $vgpr51, implicit-def $vgpr51_lo16, implicit-def $vgpr51_hi16, implicit-def $vgpr52, implicit-def $vgpr52_lo16, implicit-def $vgpr52_hi16, implicit-def $vgpr53, implicit-def $vgpr53_lo16, implicit-def $vgpr53_hi16, implicit-def $vgpr54, implicit-def $vgpr54_lo16, implicit-def $vgpr54_hi16, implicit-def $vgpr55, implicit-def $vgpr55_lo16, implicit-def $vgpr55_hi16, implicit-def $vgpr56, implicit-def $vgpr56_lo16, implicit-def $vgpr56_hi16, implicit-def $vgpr57, implicit-def $vgpr57_lo16, implicit-def $vgpr57_hi16, implicit-def $vgpr58, implicit-def $vgpr58_lo16, implicit-def $vgpr58_hi16, implicit-def $vgpr59, implicit-def $vgpr59_lo16, implicit-def $vgpr59_hi16, implicit-def $vgpr60, implicit-def $vgpr60_lo16, implicit-def $vgpr60_hi16, implicit-def $vgpr61, implicit-def $vgpr61_lo16, implicit-def $vgpr61_hi16, implicit-def $vgpr62, implicit-def $vgpr62_lo16, implicit-def $vgpr62_hi16, implicit-def $vgpr63, implicit-def $vgpr63_lo16, implicit-def $vgpr63_hi16, implicit-def $vgpr64, implicit-def $vgpr64_lo16, implicit-def $vgpr64_hi16, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $exec {
+ ; GFX1200-NEXT: S_CLAUSE 31
+ ; GFX1200-NEXT: $vgpr33 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 132, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr34 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 136, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr35 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 140, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr36 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 144, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr37 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 148, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr38 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 152, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr39 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 156, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr40 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 160, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr41 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 164, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr42 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 168, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr43 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 172, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr44 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 176, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr45 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 180, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr46 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 184, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr47 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 188, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr48 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 192, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr49 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 196, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr50 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 200, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr51 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 204, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr52 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 208, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr53 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 212, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr54 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 216, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr55 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 220, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr56 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 224, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr57 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 228, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr58 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 232, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr59 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 236, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr60 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 240, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr61 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 244, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr62 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 248, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr63 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 252, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr64 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 256, 0, 0, implicit $exec
+ ; GFX1200-NEXT: }
+ ; GFX1200-NEXT: BUNDLE implicit-def $vgpr65, implicit-def $vgpr65_lo16, implicit-def $vgpr65_hi16, implicit-def $vgpr66, implicit-def $vgpr66_lo16, implicit-def $vgpr66_hi16, implicit-def $vgpr67, implicit-def $vgpr67_lo16, implicit-def $vgpr67_hi16, implicit-def $vgpr68, implicit-def $vgpr68_lo16, implicit-def $vgpr68_hi16, implicit-def $vgpr69, implicit-def $vgpr69_lo16, implicit-def $vgpr69_hi16, implicit-def $vgpr70, implicit-def $vgpr70_lo16, implicit-def $vgpr70_hi16, implicit-def $vgpr71, implicit-def $vgpr71_lo16, implicit-def $vgpr71_hi16, implicit-def $vgpr72, implicit-def $vgpr72_lo16, implicit-def $vgpr72_hi16, implicit-def $vgpr73, implicit-def $vgpr73_lo16, implicit-def $vgpr73_hi16, implicit-def $vgpr74, implicit-def $vgpr74_lo16, implicit-def $vgpr74_hi16, implicit-def $vgpr75, implicit-def $vgpr75_lo16, implicit-def $vgpr75_hi16, implicit-def $vgpr76, implicit-def $vgpr76_lo16, implicit-def $vgpr76_hi16, implicit-def $vgpr77, implicit-def $vgpr77_lo16, implicit-def $vgpr77_hi16, implicit-def $vgpr78, implicit-def $vgpr78_lo16, implicit-def $vgpr78_hi16, implicit-def $vgpr79, implicit-def $vgpr79_lo16, implicit-def $vgpr79_hi16, implicit-def $vgpr80, implicit-def $vgpr80_lo16, implicit-def $vgpr80_hi16, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $exec {
+ ; GFX1200-NEXT: S_CLAUSE 15
+ ; GFX1200-NEXT: $vgpr65 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 260, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr66 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 264, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr67 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 268, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr68 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 272, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr69 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 276, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr70 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 280, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr71 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 284, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr72 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 288, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr73 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 292, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr74 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 296, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr75 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 300, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr76 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 304, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr77 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 308, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr78 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 312, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr79 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 316, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr80 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 320, 0, 0, implicit $exec
+ ; GFX1200-NEXT: }
+ ;
+ ; GFX1250-LABEL: name: long_clause
+ ; GFX1250: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $vgpr0
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: BUNDLE implicit-def $vgpr1, implicit-def $vgpr1_lo16, implicit-def $vgpr1_hi16, implicit-def $vgpr2, implicit-def $vgpr2_lo16, implicit-def $vgpr2_hi16, implicit-def $vgpr3, implicit-def $vgpr3_lo16, implicit-def $vgpr3_hi16, implicit-def $vgpr4, implicit-def $vgpr4_lo16, implicit-def $vgpr4_hi16, implicit-def $vgpr5, implicit-def $vgpr5_lo16, implicit-def $vgpr5_hi16, implicit-def $vgpr6, implicit-def $vgpr6_lo16, implicit-def $vgpr6_hi16, implicit-def $vgpr7, implicit-def $vgpr7_lo16, implicit-def $vgpr7_hi16, implicit-def $vgpr8, implicit-def $vgpr8_lo16, implicit-def $vgpr8_hi16, implicit-def $vgpr9, implicit-def $vgpr9_lo16, implicit-def $vgpr9_hi16, implicit-def $vgpr10, implicit-def $vgpr10_lo16, implicit-def $vgpr10_hi16, implicit-def $vgpr11, implicit-def $vgpr11_lo16, implicit-def $vgpr11_hi16, implicit-def $vgpr12, implicit-def $vgpr12_lo16, implicit-def $vgpr12_hi16, implicit-def $vgpr13, implicit-def $vgpr13_lo16, implicit-def $vgpr13_hi16, implicit-def $vgpr14, implicit-def $vgpr14_lo16, implicit-def $vgpr14_hi16, implicit-def $vgpr15, implicit-def $vgpr15_lo16, implicit-def $vgpr15_hi16, implicit-def $vgpr16, implicit-def $vgpr16_lo16, implicit-def $vgpr16_hi16, implicit-def $vgpr17, implicit-def $vgpr17_lo16, implicit-def $vgpr17_hi16, implicit-def $vgpr18, implicit-def $vgpr18_lo16, implicit-def $vgpr18_hi16, implicit-def $vgpr19, implicit-def $vgpr19_lo16, implicit-def $vgpr19_hi16, implicit-def $vgpr20, implicit-def $vgpr20_lo16, implicit-def $vgpr20_hi16, implicit-def $vgpr21, implicit-def $vgpr21_lo16, implicit-def $vgpr21_hi16, implicit-def $vgpr22, implicit-def $vgpr22_lo16, implicit-def $vgpr22_hi16, implicit-def $vgpr23, implicit-def $vgpr23_lo16, implicit-def $vgpr23_hi16, implicit-def $vgpr24, implicit-def $vgpr24_lo16, implicit-def $vgpr24_hi16, implicit-def $vgpr25, implicit-def $vgpr25_lo16, implicit-def $vgpr25_hi16, implicit-def $vgpr26, implicit-def $vgpr26_lo16, implicit-def $vgpr26_hi16, implicit-def $vgpr27, implicit-def $vgpr27_lo16, implicit-def $vgpr27_hi16, implicit-def $vgpr28, implicit-def $vgpr28_lo16, implicit-def $vgpr28_hi16, implicit-def $vgpr29, implicit-def $vgpr29_lo16, implicit-def $vgpr29_hi16, implicit-def $vgpr30, implicit-def $vgpr30_lo16, implicit-def $vgpr30_hi16, implicit-def $vgpr31, implicit-def $vgpr31_lo16, implicit-def $vgpr31_hi16, implicit-def $vgpr32, implicit-def $vgpr32_lo16, implicit-def $vgpr32_hi16, implicit-def $vgpr33, implicit-def $vgpr33_lo16, implicit-def $vgpr33_hi16, implicit-def $vgpr34, implicit-def $vgpr34_lo16, implicit-def $vgpr34_hi16, implicit-def $vgpr35, implicit-def $vgpr35_lo16, implicit-def $vgpr35_hi16, implicit-def $vgpr36, implicit-def $vgpr36_lo16, implicit-def $vgpr36_hi16, implicit-def $vgpr37, implicit-def $vgpr37_lo16, implicit-def $vgpr37_hi16, implicit-def $vgpr38, implicit-def $vgpr38_lo16, implicit-def $vgpr38_hi16, implicit-def $vgpr39, implicit-def $vgpr39_lo16, implicit-def $vgpr39_hi16, implicit-def $vgpr40, implicit-def $vgpr40_lo16, implicit-def $vgpr40_hi16, implicit-def $vgpr41, implicit-def $vgpr41_lo16, implicit-def $vgpr41_hi16, implicit-def $vgpr42, implicit-def $vgpr42_lo16, implicit-def $vgpr42_hi16, implicit-def $vgpr43, implicit-def $vgpr43_lo16, implicit-def $vgpr43_hi16, implicit-def $vgpr44, implicit-def $vgpr44_lo16, implicit-def $vgpr44_hi16, implicit-def $vgpr45, implicit-def $vgpr45_lo16, implicit-def $vgpr45_hi16, implicit-def $vgpr46, implicit-def $vgpr46_lo16, implicit-def $vgpr46_hi16, implicit-def $vgpr47, implicit-def $vgpr47_lo16, implicit-def $vgpr47_hi16, implicit-def $vgpr48, implicit-def $vgpr48_lo16, implicit-def $vgpr48_hi16, implicit-def $vgpr49, implicit-def $vgpr49_lo16, implicit-def $vgpr49_hi16, implicit-def $vgpr50, implicit-def $vgpr50_lo16, implicit-def $vgpr50_hi16, implicit-def $vgpr51, implicit-def $vgpr51_lo16, implicit-def $vgpr51_hi16, implicit-def $vgpr52, implicit-def $vgpr52_lo16, implicit-def $vgpr52_hi16, implicit-def $vgpr53, implicit-def $vgpr53_lo16, implicit-def $vgpr53_hi16, implicit-def $vgpr54, implicit-def $vgpr54_lo16, implicit-def $vgpr54_hi16, implicit-def $vgpr55, implicit-def $vgpr55_lo16, implicit-def $vgpr55_hi16, implicit-def $vgpr56, implicit-def $vgpr56_lo16, implicit-def $vgpr56_hi16, implicit-def $vgpr57, implicit-def $vgpr57_lo16, implicit-def $vgpr57_hi16, implicit-def $vgpr58, implicit-def $vgpr58_lo16, implicit-def $vgpr58_hi16, implicit-def $vgpr59, implicit-def $vgpr59_lo16, implicit-def $vgpr59_hi16, implicit-def $vgpr60, implicit-def $vgpr60_lo16, implicit-def $vgpr60_hi16, implicit-def $vgpr61, implicit-def $vgpr61_lo16, implicit-def $vgpr61_hi16, implicit-def $vgpr62, implicit-def $vgpr62_lo16, implicit-def $vgpr62_hi16, implicit-def $vgpr63, implicit-def $vgpr63_lo16, implicit-def $vgpr63_hi16, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $exec {
+ ; GFX1250-NEXT: S_CLAUSE 62
+ ; GFX1250-NEXT: $vgpr1 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 4, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr2 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 8, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr3 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 12, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr4 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 16, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr5 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 20, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr6 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 24, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr7 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 28, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr8 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 32, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr9 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 36, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr10 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 40, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr11 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 44, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr12 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 48, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr13 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 52, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr14 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 56, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr15 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 60, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr16 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 64, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr17 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 68, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr18 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 72, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr19 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 76, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr20 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 80, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr21 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 84, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr22 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 88, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr23 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 92, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr24 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 96, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr25 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 100, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr26 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 104, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr27 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 108, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr28 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 112, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr29 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 116, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr30 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 120, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr31 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 124, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr32 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 128, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr33 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 132, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr34 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 136, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr35 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 140, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr36 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 144, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr37 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 148, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr38 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 152, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr39 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 156, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr40 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 160, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr41 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 164, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr42 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 168, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr43 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 172, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr44 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 176, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr45 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 180, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr46 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 184, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr47 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 188, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr48 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 192, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr49 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 196, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr50 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 200, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr51 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 204, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr52 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 208, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr53 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 212, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr54 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 216, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr55 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 220, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr56 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 224, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr57 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 228, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr58 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 232, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr59 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 236, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr60 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 240, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr61 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 244, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr62 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 248, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr63 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 252, 0, 0, implicit $exec
+ ; GFX1250-NEXT: }
+ ; GFX1250-NEXT: BUNDLE implicit-def $vgpr64, implicit-def $vgpr64_lo16, implicit-def $vgpr64_hi16, implicit-def $vgpr65, implicit-def $vgpr65_lo16, implicit-def $vgpr65_hi16, implicit-def $vgpr66, implicit-def $vgpr66_lo16, implicit-def $vgpr66_hi16, implicit-def $vgpr67, implicit-def $vgpr67_lo16, implicit-def $vgpr67_hi16, implicit-def $vgpr68, implicit-def $vgpr68_lo16, implicit-def $vgpr68_hi16, implicit-def $vgpr69, implicit-def $vgpr69_lo16, implicit-def $vgpr69_hi16, implicit-def $vgpr70, implicit-def $vgpr70_lo16, implicit-def $vgpr70_hi16, implicit-def $vgpr71, implicit-def $vgpr71_lo16, implicit-def $vgpr71_hi16, implicit-def $vgpr72, implicit-def $vgpr72_lo16, implicit-def $vgpr72_hi16, implicit-def $vgpr73, implicit-def $vgpr73_lo16, implicit-def $vgpr73_hi16, implicit-def $vgpr74, implicit-def $vgpr74_lo16, implicit-def $vgpr74_hi16, implicit-def $vgpr75, implicit-def $vgpr75_lo16, implicit-def $vgpr75_hi16, implicit-def $vgpr76, implicit-def $vgpr76_lo16, implicit-def $vgpr76_hi16, implicit-def $vgpr77, implicit-def $vgpr77_lo16, implicit-def $vgpr77_hi16, implicit-def $vgpr78, implicit-def $vgpr78_lo16, implicit-def $vgpr78_hi16, implicit-def $vgpr79, implicit-def $vgpr79_lo16, implicit-def $vgpr79_hi16, implicit-def $vgpr80, implicit-def $vgpr80_lo16, implicit-def $vgpr80_hi16, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $exec {
+ ; GFX1250-NEXT: S_CLAUSE 16
+ ; GFX1250-NEXT: $vgpr64 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 256, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr65 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 260, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr66 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 264, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr67 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 268, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr68 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 272, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr69 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 276, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr70 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 280, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr71 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 284, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr72 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 288, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr73 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 292, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr74 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 296, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr75 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 300, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr76 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 304, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr77 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 308, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr78 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 312, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr79 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 316, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr80 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 320, 0, 0, implicit $exec
+ ; GFX1250-NEXT: }
+ $vgpr1 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 4, 0, 0, implicit $exec
+ $vgpr2 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 8, 0, 0, implicit $exec
+ $vgpr3 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 12, 0, 0, implicit $exec
+ $vgpr4 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 16, 0, 0, implicit $exec
+ $vgpr5 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 20, 0, 0, implicit $exec
+ $vgpr6 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 24, 0, 0, implicit $exec
+ $vgpr7 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 28, 0, 0, implicit $exec
+ $vgpr8 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 32, 0, 0, implicit $exec
+ $vgpr9 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 36, 0, 0, implicit $exec
+ $vgpr10 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 40, 0, 0, implicit $exec
+ $vgpr11 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 44, 0, 0, implicit $exec
+ $vgpr12 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 48, 0, 0, implicit $exec
+ $vgpr13 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 52, 0, 0, implicit $exec
+ $vgpr14 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 56, 0, 0, implicit $exec
+ $vgpr15 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 60, 0, 0, implicit $exec
+ $vgpr16 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 64, 0, 0, implicit $exec
+ $vgpr17 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 68, 0, 0, implicit $exec
+ $vgpr18 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 72, 0, 0, implicit $exec
+ $vgpr19 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 76, 0, 0, implicit $exec
+ $vgpr20 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 80, 0, 0, implicit $exec
+ $vgpr21 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 84, 0, 0, implicit $exec
+ $vgpr22 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 88, 0, 0, implicit $exec
+ $vgpr23 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 92, 0, 0, implicit $exec
+ $vgpr24 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 96, 0, 0, implicit $exec
+ $vgpr25 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 100, 0, 0, implicit $exec
+ $vgpr26 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 104, 0, 0, implicit $exec
+ $vgpr27 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 108, 0, 0, implicit $exec
+ $vgpr28 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 112, 0, 0, implicit $exec
+ $vgpr29 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 116, 0, 0, implicit $exec
+ $vgpr30 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 120, 0, 0, implicit $exec
+ $vgpr31 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 124, 0, 0, implicit $exec
+ $vgpr32 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 128, 0, 0, implicit $exec
+ $vgpr33 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 132, 0, 0, implicit $exec
+ $vgpr34 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 136, 0, 0, implicit $exec
+ $vgpr35 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 140, 0, 0, implicit $exec
+ $vgpr36 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 144, 0, 0, implicit $exec
+ $vgpr37 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 148, 0, 0, implicit $exec
+ $vgpr38 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 152, 0, 0, implicit $exec
+ $vgpr39 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 156, 0, 0, implicit $exec
+ $vgpr40 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 160, 0, 0, implicit $exec
+ $vgpr41 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 164, 0, 0, implicit $exec
+ $vgpr42 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 168, 0, 0, implicit $exec
+ $vgpr43 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 172, 0, 0, implicit $exec
+ $vgpr44 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 176, 0, 0, implicit $exec
+ $vgpr45 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 180, 0, 0, implicit $exec
+ $vgpr46 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 184, 0, 0, implicit $exec
+ $vgpr47 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 188, 0, 0, implicit $exec
+ $vgpr48 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 192, 0, 0, implicit $exec
+ $vgpr49 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 196, 0, 0, implicit $exec
+ $vgpr50 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 200, 0, 0, implicit $exec
+ $vgpr51 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 204, 0, 0, implicit $exec
+ $vgpr52 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 208, 0, 0, implicit $exec
+ $vgpr53 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 212, 0, 0, implicit $exec
+ $vgpr54 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 216, 0, 0, implicit $exec
+ $vgpr55 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 220, 0, 0, implicit $exec
+ $vgpr56 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 224, 0, 0, implicit $exec
+ $vgpr57 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 228, 0, 0, implicit $exec
+ $vgpr58 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 232, 0, 0, implicit $exec
+ $vgpr59 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 236, 0, 0, implicit $exec
+ $vgpr60 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 240, 0, 0, implicit $exec
+ $vgpr61 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 244, 0, 0, implicit $exec
+ $vgpr62 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 248, 0, 0, implicit $exec
+ $vgpr63 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 252, 0, 0, implicit $exec
+ $vgpr64 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 256, 0, 0, implicit $exec
+ $vgpr65 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 260, 0, 0, implicit $exec
+ $vgpr66 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 264, 0, 0, implicit $exec
+ $vgpr67 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 268, 0, 0, implicit $exec
+ $vgpr68 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 272, 0, 0, implicit $exec
+ $vgpr69 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 276, 0, 0, implicit $exec
+ $vgpr70 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 280, 0, 0, implicit $exec
+ $vgpr71 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 284, 0, 0, implicit $exec
+ $vgpr72 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 288, 0, 0, implicit $exec
+ $vgpr73 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 292, 0, 0, implicit $exec
+ $vgpr74 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 296, 0, 0, implicit $exec
+ $vgpr75 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 300, 0, 0, implicit $exec
+ $vgpr76 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 304, 0, 0, implicit $exec
+ $vgpr77 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 308, 0, 0, implicit $exec
+ $vgpr78 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 312, 0, 0, implicit $exec
+ $vgpr79 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 316, 0, 0, implicit $exec
+ $vgpr80 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 320, 0, 0, implicit $exec
+...
+
+---
+name: kill
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1, $sgpr4
+ ; GFX12-LABEL: name: kill
+ ; GFX12: liveins: $sgpr0_sgpr1, $sgpr4
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: BUNDLE implicit-def $sgpr2, implicit-def $sgpr2_lo16, implicit-def $sgpr2_hi16, implicit-def $sgpr3, implicit-def $sgpr3_lo16, implicit-def $sgpr3_hi16, implicit $sgpr0_sgpr1, implicit undef $sgpr4 {
+ ; GFX12-NEXT: S_CLAUSE 1
+ ; GFX12-NEXT: $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0
+ ; GFX12-NEXT: KILL undef renamable $sgpr4
+ ; GFX12-NEXT: $sgpr3 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 4, 0
+ ; GFX12-NEXT: }
+ $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0
+ KILL undef renamable $sgpr4
+ $sgpr3 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 4, 0
+...
+
+---
+name: kill2
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1, $sgpr4, $sgpr5
+ ; GFX12-LABEL: name: kill2
+ ; GFX12: liveins: $sgpr0_sgpr1, $sgpr4, $sgpr5
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: BUNDLE implicit-def $sgpr2, implicit-def $sgpr2_lo16, implicit-def $sgpr2_hi16, implicit-def $sgpr3, implicit-def $sgpr3_lo16, implicit-def $sgpr3_hi16, implicit $sgpr0_sgpr1, implicit undef $sgpr4 {
+ ; GFX12-NEXT: S_CLAUSE 1
+ ; GFX12-NEXT: $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0
+ ; GFX12-NEXT: KILL undef renamable $sgpr4
+ ; GFX12-NEXT: $sgpr3 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 4, 0
+ ; GFX12-NEXT: }
+ ; GFX12-NEXT: KILL undef renamable $sgpr5
+ $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0
+ KILL undef renamable $sgpr4
+ $sgpr3 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 4, 0
+ KILL undef renamable $sgpr5
+...
+
+---
+name: flat_load_atomic
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1, $vgpr2
+ ; GFX1200-LABEL: name: flat_load_atomic
+ ; GFX1200: liveins: $vgpr0_vgpr1, $vgpr2
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr
+ ; GFX1200-NEXT: $vgpr4 = FLAT_ATOMIC_ADD_RTN $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec, implicit $flat_scr
+ ;
+ ; GFX1250-LABEL: name: flat_load_atomic
+ ; GFX1250: liveins: $vgpr0_vgpr1, $vgpr2
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: BUNDLE implicit-def $vgpr3, implicit-def $vgpr3_lo16, implicit-def $vgpr3_hi16, implicit-def $vgpr4, implicit-def $vgpr4_lo16, implicit-def $vgpr4_hi16, implicit $vgpr0_vgpr1, implicit $exec, implicit $flat_scr, implicit $vgpr2 {
+ ; GFX1250-NEXT: S_CLAUSE 1
+ ; GFX1250-NEXT: $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr
+ ; GFX1250-NEXT: $vgpr4 = FLAT_ATOMIC_ADD_RTN $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec, implicit $flat_scr
+ ; GFX1250-NEXT: }
+ $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr
+ $vgpr4 = FLAT_ATOMIC_ADD_RTN $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec, implicit $flat_scr
+...
+
+---
+name: global_load_atomic
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1, $vgpr2
+ ; GFX1200-LABEL: name: global_load_atomic
+ ; GFX1200: liveins: $vgpr0_vgpr1, $vgpr2
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $vgpr3 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr4 = GLOBAL_ATOMIC_ADD_RTN $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec
+ ;
+ ; GFX1250-LABEL: name: global_load_atomic
+ ; GFX1250: liveins: $vgpr0_vgpr1, $vgpr2
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: BUNDLE implicit-def $vgpr3, implicit-def $vgpr3_lo16, implicit-def $vgpr3_hi16, implicit-def $vgpr4, implicit-def $vgpr4_lo16, implicit-def $vgpr4_hi16, implicit $vgpr0_vgpr1, implicit $exec, implicit $vgpr2 {
+ ; GFX1250-NEXT: S_CLAUSE 1
+ ; GFX1250-NEXT: $vgpr3 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr4 = GLOBAL_ATOMIC_ADD_RTN $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec
+ ; GFX1250-NEXT: }
+ $vgpr3 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec
+ $vgpr4 = GLOBAL_ATOMIC_ADD_RTN $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec
+...
+
+---
+name: flat_global_load
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1
+ ; GFX12-LABEL: name: flat_global_load
+ ; GFX12: liveins: $vgpr0_vgpr1
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: $vgpr2 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr
+ ; GFX12-NEXT: $vgpr3 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 4, 0, implicit $exec, implicit $flat_scr
+ $vgpr2 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr
+ $vgpr3 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 4, 0, implicit $exec, implicit $flat_scr
+...
+
+---
+name: buffer_load_atomic
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, $vgpr0
+ ; GFX1200-LABEL: name: buffer_load_atomic
+ ; GFX1200: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, $vgpr0
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, implicit $exec
+ ; GFX1200-NEXT: $vgpr0 = BUFFER_ATOMIC_ADD_OFFSET_RTN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 4, 0, 0, implicit $exec
+ ;
+ ; GFX1250-LABEL: name: buffer_load_atomic
+ ; GFX1250: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, $vgpr0
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: BUNDLE implicit-def $vgpr1, implicit-def $vgpr1_lo16, implicit-def $vgpr1_hi16, implicit-def $vgpr0, implicit-def $vgpr0_lo16, implicit-def $vgpr0_hi16, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4, implicit $exec, implicit $vgpr0 {
+ ; GFX1250-NEXT: S_CLAUSE 1
+ ; GFX1250-NEXT: $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, implicit $exec
+ ; GFX1250-NEXT: $vgpr0 = BUFFER_ATOMIC_ADD_OFFSET_RTN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 4, 0, 0, implicit $exec
+ ; GFX1250-NEXT: }
+ $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, implicit $exec
+ $vgpr0 = BUFFER_ATOMIC_ADD_OFFSET_RTN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 4, 0, 0, implicit $exec
+...
+
+---
+name: flat_load_store
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1, $vgpr2
+ ; GFX1200-LABEL: name: flat_load_store
+ ; GFX1200: liveins: $vgpr0_vgpr1, $vgpr2
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr
+ ; GFX1200-NEXT: FLAT_STORE_DWORD $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec, implicit $flat_scr
+ ;
+ ; GFX1250-LABEL: name: flat_load_store
+ ; GFX1250: liveins: $vgpr0_vgpr1, $vgpr2
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: BUNDLE implicit-def $vgpr3, implicit-def $vgpr3_lo16, implicit-def $vgpr3_hi16, implicit $vgpr0_vgpr1, implicit $exec, implicit $flat_scr, implicit $vgpr2 {
+ ; GFX1250-NEXT: S_CLAUSE 1
+ ; GFX1250-NEXT: $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr
+ ; GFX1250-NEXT: FLAT_STORE_DWORD $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec, implicit $flat_scr
+ ; GFX1250-NEXT: }
+ $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr
+ FLAT_STORE_DWORD $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec, implicit $flat_scr
+...
+
+---
+name: global_load_store
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1, $vgpr2
+ ; GFX1200-LABEL: name: global_load_store
+ ; GFX1200: liveins: $vgpr0_vgpr1, $vgpr2
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $vgpr3 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec
+ ; GFX1200-NEXT: GLOBAL_STORE_DWORD $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec
+ ;
+ ; GFX1250-LABEL: name: global_load_store
+ ; GFX1250: liveins: $vgpr0_vgpr1, $vgpr2
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: BUNDLE implicit-def $vgpr3, implicit-def $vgpr3_lo16, implicit-def $vgpr3_hi16, implicit $vgpr0_vgpr1, implicit $exec, implicit $vgpr2 {
+ ; GFX1250-NEXT: S_CLAUSE 1
+ ; GFX1250-NEXT: $vgpr3 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec
+ ; GFX1250-NEXT: GLOBAL_STORE_DWORD $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec
+ ; GFX1250-NEXT: }
+ $vgpr3 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec
+ GLOBAL_STORE_DWORD $vgpr0_vgpr1, $vgpr2, 4, 0, implicit $exec
+...
+
+---
+name: buffer_load_store
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, $vgpr0
+ ; GFX1200-LABEL: name: buffer_load_store
+ ; GFX1200: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, $vgpr0
+ ; GFX1200-NEXT: {{ $}}
+ ; GFX1200-NEXT: $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, implicit $exec
+ ; GFX1200-NEXT: BUFFER_STORE_DWORD_OFFSET $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec
+ ;
+ ; GFX1250-LABEL: name: buffer_load_store
+ ; GFX1250: liveins: $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, $vgpr0
+ ; GFX1250-NEXT: {{ $}}
+ ; GFX1250-NEXT: BUNDLE implicit-def $vgpr1, implicit-def $vgpr1_lo16, implicit-def $vgpr1_hi16, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4, implicit $exec, implicit $vgpr0 {
+ ; GFX1250-NEXT: S_CLAUSE 1
+ ; GFX1250-NEXT: $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, implicit $exec
+ ; GFX1250-NEXT: BUFFER_STORE_DWORD_OFFSET $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec
+ ; GFX1250-NEXT: }
+ $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, implicit $exec
+ BUFFER_STORE_DWORD_OFFSET $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec
+...
+
+---
+name: flat_load_global_load
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1, $vgpr2
+ ; GFX12-LABEL: name: flat_load_global_load
+ ; GFX12: liveins: $vgpr0_vgpr1, $vgpr2
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr
+ ; GFX12-NEXT: $vgpr4 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec
+ $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr
+ $vgpr4 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec
+...
+
+---
+name: global_load_buffer_store
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1, $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4
+ ; GFX12-LABEL: name: global_load_buffer_store
+ ; GFX12: liveins: $vgpr0_vgpr1, $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: $vgpr4 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec
+ ; GFX12-NEXT: BUFFER_STORE_DWORD_OFFSET $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec
+ $vgpr4 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec
+ BUFFER_STORE_DWORD_OFFSET $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec
+...
---
name: flat_prefetch_flat_load
@@ -31,3 +532,106 @@ body: |
GLOBAL_PREFETCH_B8 $vgpr0_vgpr1, 0, 0, implicit $exec
$vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr
...
+
+---
+name: async_load_async_store
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1, $vgpr2
+ ; GFX12-LABEL: name: async_load_async_store
+ ; GFX12: liveins: $vgpr0, $vgpr1, $vgpr2
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: BUNDLE implicit-def $asynccnt, implicit $vgpr2, implicit $vgpr0_vgpr1, implicit $exec, implicit $asynccnt {
+ ; GFX12-NEXT: S_CLAUSE 1
+ ; GFX12-NEXT: GLOBAL_LOAD_ASYNC_TO_LDS_B32 $vgpr2, $vgpr0_vgpr1, 0, 0, implicit-def $asynccnt, implicit $exec, implicit $asynccnt
+ ; GFX12-NEXT: GLOBAL_STORE_ASYNC_FROM_LDS_B32 $vgpr0_vgpr1, $vgpr2, 32, 0, implicit-def $asynccnt, implicit $exec, implicit internal $asynccnt
+ ; GFX12-NEXT: }
+ GLOBAL_LOAD_ASYNC_TO_LDS_B32 $vgpr2, $vgpr0_vgpr1, 0, 0, implicit-def $asynccnt, implicit $exec, implicit $asynccnt
+ GLOBAL_STORE_ASYNC_FROM_LDS_B32 $vgpr0_vgpr1, $vgpr2, 32, 0, implicit-def $asynccnt, implicit $exec, implicit $asynccnt
+...
+
+---
+name: async_load_ds_load_tr
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1, $vgpr2
+ ; GFX12-LABEL: name: async_load_ds_load_tr
+ ; GFX12: liveins: $vgpr0, $vgpr1, $vgpr2
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: GLOBAL_LOAD_ASYNC_TO_LDS_B32 $vgpr2, $vgpr0_vgpr1, 0, 0, implicit-def $asynccnt, implicit $exec, implicit $asynccnt
+ ; GFX12-NEXT: $vgpr0_vgpr1 = DS_LOAD_TR8_B64 $vgpr2, 8, 0, implicit $exec
+ GLOBAL_LOAD_ASYNC_TO_LDS_B32 $vgpr2, $vgpr0_vgpr1, 0, 0, implicit-def $asynccnt, implicit $exec, implicit $asynccnt
+ $vgpr0_vgpr1 = DS_LOAD_TR8_B64 $vgpr2, 8, 0, implicit $exec
+...
+
+---
+name: ds_load_trs_ds_load
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+ ; GFX12-LABEL: name: ds_load_trs_ds_load
+ ; GFX12: liveins: $vgpr0
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: $vgpr4_vgpr5 = DS_LOAD_TR8_B64 $vgpr0, 0, 0, implicit $exec
+ ; GFX12-NEXT: $vgpr0_vgpr1 = DS_LOAD_TR8_B64 $vgpr0, 8, 0, implicit $exec
+ ; GFX12-NEXT: $vgpr2_vgpr3 = DS_READ_B64_gfx9 $vgpr0, 16, 0, implicit $exec
+ $vgpr4_vgpr5 = DS_LOAD_TR8_B64 $vgpr0, 0, 0, implicit $exec
+ $vgpr0_vgpr1 = DS_LOAD_TR8_B64 $vgpr0, 8, 0, implicit $exec
+ $vgpr2_vgpr3 = DS_READ_B64_gfx9 $vgpr0, 16, 0, implicit $exec
+...
+
+# Make sure we do not clause DS_ATOMIC_ASYNC_BARRIER_ARRIVE_B64 with anything
+---
+name: ds_atomic_async_barrier_arrive_b64_ds_read
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1
+ ; GFX12-LABEL: name: ds_atomic_async_barrier_arrive_b64_ds_read
+ ; GFX12: liveins: $vgpr0, $vgpr1
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: $vgpr2 = DS_READ_B32_gfx9 $vgpr0, 0, 0, implicit $exec
+ ; GFX12-NEXT: DS_ATOMIC_ASYNC_BARRIER_ARRIVE_B64 $vgpr1, 0, 0, implicit-def $asynccnt, implicit $asynccnt, implicit $exec
+ ; GFX12-NEXT: $vgpr3 = DS_READ_B32_gfx9 $vgpr0, 16, 0, implicit $exec
+ $vgpr2 = DS_READ_B32_gfx9 $vgpr0, 0, 0, implicit $exec
+ DS_ATOMIC_ASYNC_BARRIER_ARRIVE_B64 $vgpr1, 0, 0, implicit-def $asynccnt, implicit $asynccnt, implicit $exec
+ $vgpr3 = DS_READ_B32_gfx9 $vgpr0, 16, 0, implicit $exec
+...
+
+---
+name: ds_atomic_async_barrier_arrive_b64_flat_load
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1
+ ; GFX12-LABEL: name: ds_atomic_async_barrier_arrive_b64_flat_load
+ ; GFX12: liveins: $vgpr0, $vgpr1
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: $vgpr2 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr
+ ; GFX12-NEXT: DS_ATOMIC_ASYNC_BARRIER_ARRIVE_B64 $vgpr1, 0, 0, implicit-def $asynccnt, implicit $asynccnt, implicit $exec
+ ; GFX12-NEXT: $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 16, 0, implicit $exec, implicit $flat_scr
+ $vgpr2 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr
+ DS_ATOMIC_ASYNC_BARRIER_ARRIVE_B64 $vgpr1, 0, 0, implicit-def $asynccnt, implicit $asynccnt, implicit $exec
+ $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 16, 0, implicit $exec, implicit $flat_scr
+...
+
+---
+name: global_load_switching_scope
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1
+ ; GFX12-LABEL: name: global_load_switching_scope
+ ; GFX12: liveins: $vgpr0_vgpr1
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: BUNDLE implicit-def $vgpr2, implicit-def $vgpr2_lo16, implicit-def $vgpr2_hi16, implicit-def $vgpr3, implicit-def $vgpr3_lo16, implicit-def $vgpr3_hi16, implicit $vgpr0_vgpr1, implicit $exec, implicit $flat_scr {
+ ; GFX12-NEXT: S_CLAUSE 1
+ ; GFX12-NEXT: $vgpr2 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr
+ ; GFX12-NEXT: $vgpr3 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 4, 24, implicit $exec, implicit $flat_scr
+ ; GFX12-NEXT: }
+ $vgpr2 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr
+ $vgpr3 = GLOBAL_LOAD_DWORD $vgpr0_vgpr1, 4, 24, implicit $exec, implicit $flat_scr
+...
diff --git a/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-agpr-negative-tests.mir b/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-agpr-negative-tests.mir
index c7767cb8..b53bde6 100644
--- a/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-agpr-negative-tests.mir
+++ b/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-agpr-negative-tests.mir
@@ -20,11 +20,32 @@
ret void
}
+ define amdgpu_kernel void @inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_physreg_src2() #0 {
+ ret void
+ }
+
define amdgpu_kernel void @inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_src2_different_subreg() #0 {
ret void
}
+ define amdgpu_kernel void @inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_first() #1 {
+ ret void
+ }
+
+ define amdgpu_kernel void @inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_second() #1 {
+ ret void
+ }
+
+ define amdgpu_kernel void @inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_first_physreg() #1 {
+ ret void
+ }
+
+ define amdgpu_kernel void @inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_second_physreg() #1 {
+ ret void
+ }
+
attributes #0 = { "amdgpu-wave-limiter"="true" "amdgpu-waves-per-eu"="8,8" }
+ attributes #1 = { "amdgpu-wave-limiter"="true" "amdgpu-waves-per-eu"="10,10" }
...
# Inflate pattern, except the defining instruction isn't an MFMA.
@@ -403,6 +424,89 @@ body: |
...
+# Non-mac variant, src2 is a physical register
+---
+name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_physreg_src2
+tracksRegLiveness: true
+machineFunctionInfo:
+ isEntryFunction: true
+ stackPtrOffsetReg: '$sgpr32'
+ occupancy: 10
+ sgprForEXECCopy: '$sgpr100_sgpr101'
+body: |
+ ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_physreg_src2
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0
+ ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0
+ ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0
+ ; CHECK-NEXT: renamable $vgpr0_vgpr1 = COPY killed renamable $sgpr0_sgpr1
+ ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; CHECK-NEXT: liveins: $vcc, $vgpr0_vgpr1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: early-clobber renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr0_vgpr1, $vgpr0_vgpr1, undef $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ ; CHECK-NEXT: S_BRANCH %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: liveins: $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17:0x00000000FFFFFFFF
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: S_ENDPGM 0
+ bb.0:
+ S_NOP 0, implicit-def $agpr0
+ renamable $sgpr0 = S_MOV_B32 0
+ undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec
+ renamable $sgpr1 = COPY renamable $sgpr0
+ %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1
+ renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ %0.sub9:vreg_512_align2 = COPY %0.sub8
+
+ bb.1:
+ liveins: $vcc
+
+ %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, undef $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec
+ S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ S_BRANCH %bb.2
+
+ bb.2:
+ ; No VGPRs available for %0
+ S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ %2:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ S_ENDPGM 0
+
+...
+
# Non-mac variant, src2 is the same VGPR, but a different subregister.
---
name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_src2_different_subreg
@@ -489,3 +593,423 @@ body: |
S_ENDPGM 0
...
+
+# There isn't an assignable AGPR around the first MFMA.
+---
+name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_first
+tracksRegLiveness: true
+machineFunctionInfo:
+ isEntryFunction: true
+ stackPtrOffsetReg: '$sgpr32'
+ occupancy: 10
+ sgprForEXECCopy: '$sgpr100_sgpr101'
+body: |
+ ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_first
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0
+ ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0
+ ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0
+ ; CHECK-NEXT: renamable $vgpr18_vgpr19 = COPY killed renamable $sgpr0_sgpr1
+ ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; CHECK-NEXT: liveins: $vcc, $vgpr18_vgpr19
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vgpr16_vgpr17 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ ; CHECK-NEXT: S_NOP 0, implicit-def renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, implicit-def renamable $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31, implicit-def renamable $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39_agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47, implicit-def renamable $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55_agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63
+ ; CHECK-NEXT: early-clobber renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: S_NOP 0, implicit killed renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, implicit killed renamable $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31, implicit killed renamable $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39_agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47, implicit killed renamable $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55_agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63
+ ; CHECK-NEXT: early-clobber renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ ; CHECK-NEXT: S_BRANCH %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: liveins: $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35:0x00000000FFFFFFFF
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: S_ENDPGM 0
+ bb.0:
+ S_NOP 0, implicit-def $agpr0
+ renamable $sgpr0 = S_MOV_B32 0
+ undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec
+ renamable $sgpr1 = COPY renamable $sgpr0
+ %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1
+ renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ %0.sub9:vreg_512_align2 = COPY %0.sub8
+
+ bb.1:
+ liveins: $vcc
+
+ undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %2:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ S_NOP 0, implicit-def %6:areg_512_align2, implicit-def %7:areg_512_align2, implicit-def %8:areg_512_align2, implicit-def %9:areg_512_align2
+ %3:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
+ S_NOP 0, implicit %6, implicit %7, implicit %8, implicit %9
+ %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %3, 0, 0, 0, implicit $mode, implicit $exec
+ S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ S_BRANCH %bb.2
+
+ bb.2:
+ ; No VGPRs available for %0 or %4
+ S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ S_ENDPGM 0
+
+...
+
+# There isn't an assignable AGPR around the second MFMA.
+---
+name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_second
+tracksRegLiveness: true
+machineFunctionInfo:
+ isEntryFunction: true
+ stackPtrOffsetReg: '$sgpr32'
+ occupancy: 10
+ sgprForEXECCopy: '$sgpr100_sgpr101'
+body: |
+ ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_second
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0
+ ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0
+ ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0
+ ; CHECK-NEXT: renamable $vgpr18_vgpr19 = COPY killed renamable $sgpr0_sgpr1
+ ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; CHECK-NEXT: liveins: $vcc, $vgpr18_vgpr19
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vgpr16_vgpr17 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ ; CHECK-NEXT: early-clobber renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: S_NOP 0, implicit-def renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, implicit-def renamable $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31, implicit-def renamable $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39_agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47, implicit-def renamable $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55_agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63
+ ; CHECK-NEXT: early-clobber renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: S_NOP 0, implicit killed renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, implicit killed renamable $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31, implicit killed renamable $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39_agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47, implicit killed renamable $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55_agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63
+ ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ ; CHECK-NEXT: S_BRANCH %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: liveins: $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35:0x00000000FFFFFFFF
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: S_ENDPGM 0
+ bb.0:
+ S_NOP 0, implicit-def $agpr0
+ renamable $sgpr0 = S_MOV_B32 0
+ undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec
+ renamable $sgpr1 = COPY renamable $sgpr0
+ %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1
+ renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ %0.sub9:vreg_512_align2 = COPY %0.sub8
+
+ bb.1:
+ liveins: $vcc
+
+ undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %2:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ %3:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
+ S_NOP 0, implicit-def %6:areg_512_align2, implicit-def %7:areg_512_align2, implicit-def %8:areg_512_align2, implicit-def %9:areg_512_align2
+ %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %3, 0, 0, 0, implicit $mode, implicit $exec
+ S_NOP 0, implicit %6, implicit %7, implicit %8, implicit %9
+ S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ S_BRANCH %bb.2
+
+ bb.2:
+ ; No VGPRs available for %0 or %4
+ S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ S_ENDPGM 0
+
+...
+
+# There isn't an assignable AGPR around the first MFMA, with physreg interference
+---
+name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_first_physreg
+tracksRegLiveness: true
+machineFunctionInfo:
+ isEntryFunction: true
+ stackPtrOffsetReg: '$sgpr32'
+ occupancy: 10
+ sgprForEXECCopy: '$sgpr100_sgpr101'
+body: |
+ ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_first_physreg
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0
+ ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0
+ ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0
+ ; CHECK-NEXT: renamable $vgpr18_vgpr19 = COPY killed renamable $sgpr0_sgpr1
+ ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; CHECK-NEXT: liveins: $vcc, $vgpr18_vgpr19
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vgpr16_vgpr17 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63
+ ; CHECK-NEXT: early-clobber renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: S_NOP 0, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7
+ ; CHECK-NEXT: S_NOP 0, implicit $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ ; CHECK-NEXT: S_NOP 0, implicit $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23
+ ; CHECK-NEXT: S_NOP 0, implicit $agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31
+ ; CHECK-NEXT: S_NOP 0, implicit $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39
+ ; CHECK-NEXT: S_NOP 0, implicit $agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47
+ ; CHECK-NEXT: S_NOP 0, implicit $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55
+ ; CHECK-NEXT: S_NOP 0, implicit $agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63
+ ; CHECK-NEXT: early-clobber renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ ; CHECK-NEXT: S_BRANCH %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: liveins: $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35:0x00000000FFFFFFFF
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: S_ENDPGM 0
+ bb.0:
+ S_NOP 0, implicit-def $agpr0
+ renamable $sgpr0 = S_MOV_B32 0
+ undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec
+ renamable $sgpr1 = COPY renamable $sgpr0
+ %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1
+ renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ %0.sub9:vreg_512_align2 = COPY %0.sub8
+
+ bb.1:
+ liveins: $vcc
+
+ undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %2:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ S_NOP 0, implicit-def $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7
+ S_NOP 0, implicit-def $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ S_NOP 0, implicit-def $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23
+ S_NOP 0, implicit-def $agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31
+ S_NOP 0, implicit-def $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39
+ S_NOP 0, implicit-def $agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47
+ S_NOP 0, implicit-def $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55
+ S_NOP 0, implicit-def $agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63
+ %3:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
+ S_NOP 0, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7
+ S_NOP 0, implicit $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ S_NOP 0, implicit $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23
+ S_NOP 0, implicit $agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31
+ S_NOP 0, implicit $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39
+ S_NOP 0, implicit $agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47
+ S_NOP 0, implicit $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55
+ S_NOP 0, implicit $agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63
+ %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %3, 0, 0, 0, implicit $mode, implicit $exec
+ S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ S_BRANCH %bb.2
+
+ bb.2:
+ ; No VGPRs available for %0 or %4
+ S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ S_ENDPGM 0
+
+...
+
+# There isn't an assignable AGPR around the second MFMA, physreg interference
+---
+name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_second_physreg
+tracksRegLiveness: true
+machineFunctionInfo:
+ isEntryFunction: true
+ stackPtrOffsetReg: '$sgpr32'
+ occupancy: 10
+ sgprForEXECCopy: '$sgpr100_sgpr101'
+body: |
+ ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_second_physreg
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0
+ ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0
+ ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0
+ ; CHECK-NEXT: renamable $vgpr18_vgpr19 = COPY killed renamable $sgpr0_sgpr1
+ ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; CHECK-NEXT: liveins: $vcc, $vgpr18_vgpr19
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vgpr16_vgpr17 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ ; CHECK-NEXT: early-clobber renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63
+ ; CHECK-NEXT: early-clobber renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: S_NOP 0, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7
+ ; CHECK-NEXT: S_NOP 0, implicit $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ ; CHECK-NEXT: S_NOP 0, implicit $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23
+ ; CHECK-NEXT: S_NOP 0, implicit $agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31
+ ; CHECK-NEXT: S_NOP 0, implicit $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39
+ ; CHECK-NEXT: S_NOP 0, implicit $agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47
+ ; CHECK-NEXT: S_NOP 0, implicit $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55
+ ; CHECK-NEXT: S_NOP 0, implicit $agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63
+ ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ ; CHECK-NEXT: S_BRANCH %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: liveins: $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35:0x00000000FFFFFFFF
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: S_ENDPGM 0
+ bb.0:
+ S_NOP 0, implicit-def $agpr0
+ renamable $sgpr0 = S_MOV_B32 0
+ undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec
+ renamable $sgpr1 = COPY renamable $sgpr0
+ %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1
+ renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ %0.sub9:vreg_512_align2 = COPY %0.sub8
+
+ bb.1:
+ liveins: $vcc
+
+ undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %2:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ %3:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
+ S_NOP 0, implicit-def $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7
+ S_NOP 0, implicit-def $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ S_NOP 0, implicit-def $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23
+ S_NOP 0, implicit-def $agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31
+ S_NOP 0, implicit-def $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39
+ S_NOP 0, implicit-def $agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47
+ S_NOP 0, implicit-def $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55
+ S_NOP 0, implicit-def $agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63
+ %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %3, 0, 0, 0, implicit $mode, implicit $exec
+ S_NOP 0, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7
+ S_NOP 0, implicit $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ S_NOP 0, implicit $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23
+ S_NOP 0, implicit $agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31
+ S_NOP 0, implicit $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39
+ S_NOP 0, implicit $agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47
+ S_NOP 0, implicit $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55
+ S_NOP 0, implicit $agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63
+ S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ S_BRANCH %bb.2
+
+ bb.2:
+ ; No VGPRs available for %0 or %4
+ S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ S_ENDPGM 0
+
+...
diff --git a/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-av-with-load-source.mir b/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-av-with-load-source.mir
index b907c13..b59f2de 100644
--- a/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-av-with-load-source.mir
+++ b/llvm/test/CodeGen/AMDGPU/inflate-reg-class-vgpr-mfma-to-av-with-load-source.mir
@@ -445,6 +445,86 @@ body: |
...
+
+---
+name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_two_chained_uses_cannot_rewrite_final_use
+tracksRegLiveness: true
+machineFunctionInfo:
+ isEntryFunction: true
+ stackPtrOffsetReg: '$sgpr32'
+ occupancy: 10
+ sgprForEXECCopy: '$sgpr100_sgpr101'
+body: |
+ ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_two_chained_uses_cannot_rewrite_final_use
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0
+ ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0
+ ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0
+ ; CHECK-NEXT: renamable $vgpr0_vgpr1 = COPY killed renamable $sgpr0_sgpr1
+ ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; CHECK-NEXT: liveins: $vcc, $vgpr0_vgpr1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vgpr2_vgpr3 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 $vgpr0_vgpr1, $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 $vgpr0_vgpr1, $vgpr0_vgpr1, killed $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ ; CHECK-NEXT: S_BRANCH %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: liveins: $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17:0x00000000FFFFFFFF
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ ; CHECK-NEXT: renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = COPY killed renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ ; CHECK-NEXT: INLINEASM &"; use $0 ", 1 /* sideeffect attdialect */, 27983881 /* reguse:VReg_512_Align2 */, killed renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: S_ENDPGM 0
+ bb.0:
+ S_NOP 0, implicit-def $agpr0
+ renamable $sgpr0 = S_MOV_B32 0
+ undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec
+ renamable $sgpr1 = COPY renamable $sgpr0
+ %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1
+ renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ %0.sub9:vreg_512_align2 = COPY %0.sub8
+
+ bb.1:
+ liveins: $vcc
+
+ undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
+ %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
+ S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ S_BRANCH %bb.2
+
+ bb.2:
+ ; No VGPRs available for %0
+ S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ INLINEASM &"; use $0 ", 1 /* sideeffect attdialect */, 27983881 /* reguse:VReg_512_Align2 */, %0:vreg_512_align2
+ S_ENDPGM 0
+
+...
+
# There is a rewrite candidate, but it is used by another MFMA which
# does not have a tied result.
---
@@ -619,10 +699,9 @@ body: |
S_ENDPGM 0
...
-
-# There isn't an assignable AGPR around the first MFMA.
+# Chain of 2 untied cases, but the use isn't in src2.
---
-name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_first
+name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_non_src2
tracksRegLiveness: true
machineFunctionInfo:
isEntryFunction: true
@@ -630,7 +709,7 @@ machineFunctionInfo:
occupancy: 10
sgprForEXECCopy: '$sgpr100_sgpr101'
body: |
- ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_first
+ ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_non_src2
; CHECK: bb.0:
; CHECK-NEXT: successors: %bb.1(0x80000000)
; CHECK-NEXT: {{ $}}
@@ -647,10 +726,8 @@ body: |
; CHECK-NEXT: liveins: $vcc, $vgpr18_vgpr19
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: renamable $vgpr16_vgpr17 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1)
- ; CHECK-NEXT: S_NOP 0, implicit-def renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, implicit-def renamable $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31, implicit-def renamable $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39_agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47, implicit-def renamable $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55_agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63
; CHECK-NEXT: early-clobber renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 0, 0, 0, implicit $mode, implicit $exec
- ; CHECK-NEXT: S_NOP 0, implicit killed renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, implicit killed renamable $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31, implicit killed renamable $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39_agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47, implicit killed renamable $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55_agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63
- ; CHECK-NEXT: early-clobber renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: early-clobber renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 = V_MFMA_F32_32X32X8F16_vgprcd_e64 killed $vgpr4_vgpr5, $vgpr8_vgpr9, undef $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec
; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc
; CHECK-NEXT: S_BRANCH %bb.2
; CHECK-NEXT: {{ $}}
@@ -685,10 +762,8 @@ body: |
liveins: $vcc
undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %2:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1)
- S_NOP 0, implicit-def %6:areg_512_align2, implicit-def %7:areg_512_align2, implicit-def %8:areg_512_align2, implicit-def %9:areg_512_align2
%3:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
- S_NOP 0, implicit %6, implicit %7, implicit %8, implicit %9
- %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %3, 0, 0, 0, implicit $mode, implicit $exec
+ %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %3.sub4_sub5, %3.sub8_sub9, undef %6:vreg_512_align2, 0, 0, 0, implicit $mode, implicit $exec
S_CBRANCH_VCCNZ %bb.1, implicit $vcc
S_BRANCH %bb.2
@@ -711,9 +786,10 @@ body: |
...
-# There isn't an assignable AGPR around the second MFMA.
+# Chain of 2 untied cases, but the second mfma is a different size and
+# uses a subregister.
---
-name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_second
+name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_subreg
tracksRegLiveness: true
machineFunctionInfo:
isEntryFunction: true
@@ -721,7 +797,7 @@ machineFunctionInfo:
occupancy: 10
sgprForEXECCopy: '$sgpr100_sgpr101'
body: |
- ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_no_agprs_second
+ ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_subreg
; CHECK: bb.0:
; CHECK-NEXT: successors: %bb.1(0x80000000)
; CHECK-NEXT: {{ $}}
@@ -739,18 +815,16 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: renamable $vgpr16_vgpr17 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1)
; CHECK-NEXT: early-clobber renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 0, 0, 0, implicit $mode, implicit $exec
- ; CHECK-NEXT: S_NOP 0, implicit-def renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, implicit-def renamable $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31, implicit-def renamable $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39_agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47, implicit-def renamable $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55_agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63
- ; CHECK-NEXT: early-clobber renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec
- ; CHECK-NEXT: S_NOP 0, implicit killed renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, implicit killed renamable $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31, implicit killed renamable $agpr32_agpr33_agpr34_agpr35_agpr36_agpr37_agpr38_agpr39_agpr40_agpr41_agpr42_agpr43_agpr44_agpr45_agpr46_agpr47, implicit killed renamable $agpr48_agpr49_agpr50_agpr51_agpr52_agpr53_agpr54_agpr55_agpr56_agpr57_agpr58_agpr59_agpr60_agpr61_agpr62_agpr63
+ ; CHECK-NEXT: renamable $vgpr0_vgpr1_vgpr2_vgpr3 = V_MFMA_F32_16X16X16F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, killed $vgpr2_vgpr3_vgpr4_vgpr5, 0, 0, 0, implicit $mode, implicit $exec
; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc
; CHECK-NEXT: S_BRANCH %bb.2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
- ; CHECK-NEXT: liveins: $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35:0x00000000FFFFFFFF
+ ; CHECK-NEXT: liveins: $vgpr0_vgpr1_vgpr2_vgpr3
; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3 = COPY killed renamable $vgpr0_vgpr1_vgpr2_vgpr3
; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
- ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35
; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
@@ -758,10 +832,7 @@ body: |
; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec
- ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
- ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
- ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
- ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
; CHECK-NEXT: S_ENDPGM 0
bb.0:
S_NOP 0, implicit-def $agpr0
@@ -777,9 +848,7 @@ body: |
undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %2:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1)
%3:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
- S_NOP 0, implicit-def %6:areg_512_align2, implicit-def %7:areg_512_align2, implicit-def %8:areg_512_align2, implicit-def %9:areg_512_align2
- %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %3, 0, 0, 0, implicit $mode, implicit $exec
- S_NOP 0, implicit %6, implicit %7, implicit %8, implicit %9
+ %4:vreg_128_align2 = V_MFMA_F32_16X16X16F16_vgprcd_e64 %1, %1, %3.sub2_sub3_sub4_sub5, 0, 0, 0, implicit $mode, implicit $exec
S_CBRANCH_VCCNZ %bb.1, implicit $vcc
S_BRANCH %bb.2
@@ -794,6 +863,229 @@ body: |
S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
%5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ S_ENDPGM 0
+
+...
+
+---
+name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_local_split
+tracksRegLiveness: true
+machineFunctionInfo:
+ isEntryFunction: true
+ stackPtrOffsetReg: '$sgpr32'
+ occupancy: 10
+ sgprForEXECCopy: '$sgpr100_sgpr101'
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_local_split
+ ; CHECK: S_NOP 0, implicit-def $agpr0
+ ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0
+ ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0
+ ; CHECK-NEXT: renamable $vgpr0_vgpr1 = COPY killed renamable $sgpr0_sgpr1
+ ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8
+ ; CHECK-NEXT: renamable $agpr0_agpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X8F16_mac_e64 killed $vgpr0_vgpr1, $vgpr0_vgpr1, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = COPY killed renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr10_vgpr11_vgpr12_vgpr13, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr14_vgpr15_vgpr16_vgpr17, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr2_vgpr3_vgpr4_vgpr5, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $vgpr6_vgpr7_vgpr8_vgpr9, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: S_ENDPGM 0
+ S_NOP 0, implicit-def $agpr0
+ renamable $sgpr0 = S_MOV_B32 0
+ undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec
+ renamable $sgpr1 = COPY renamable $sgpr0
+ %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1
+ renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ %0.sub9:vreg_512_align2 = COPY %0.sub8
+ undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
+ ; No VGPRs available for %0
+ S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ %2:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ S_ENDPGM 0
+
+...
+
+# Performs a split and inflate around the single instruction
+---
+name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_instruction_split
+tracksRegLiveness: true
+machineFunctionInfo:
+ isEntryFunction: true
+ stackPtrOffsetReg: '$sgpr32'
+ occupancy: 10
+ sgprForEXECCopy: '$sgpr100_sgpr101'
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_instruction_split
+ ; CHECK: S_NOP 0, implicit-def $agpr0
+ ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0
+ ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0
+ ; CHECK-NEXT: renamable $vgpr0_vgpr1 = COPY killed renamable $sgpr0_sgpr1
+ ; CHECK-NEXT: SI_SPILL_AV64_SAVE killed $vgpr0_vgpr1, %stack.0, $sgpr32, 0, implicit $exec :: (store (s64) into %stack.0, align 4, addrspace 5)
+ ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8
+ ; CHECK-NEXT: renamable $agpr0_agpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ ; CHECK-NEXT: renamable $vgpr2_vgpr3 = SI_SPILL_AV64_RESTORE %stack.0, $sgpr32, 0, implicit $exec :: (load (s64) from %stack.0, align 4, addrspace 5)
+ ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = V_MFMA_F32_32X32X8F16_mac_e64 killed $vgpr2_vgpr3, $vgpr2_vgpr3, $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = COPY killed renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr10_vgpr11_vgpr12_vgpr13, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr14_vgpr15_vgpr16_vgpr17, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr2_vgpr3_vgpr4_vgpr5, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $vgpr6_vgpr7_vgpr8_vgpr9, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: S_ENDPGM 0
+ S_NOP 0, implicit-def $agpr0
+ renamable $sgpr0 = S_MOV_B32 0
+ undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec
+ renamable $sgpr1 = COPY renamable $sgpr0
+ %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1
+ renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ %0.sub9:vreg_512_align2 = COPY %0.sub8
+ undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
+ ; No VGPRs available for %0
+ S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ %2:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ S_ENDPGM 0
+
+...
+
+# Performs a split and inflate around the single instruction, non-tied case
+---
+name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_instruction_split
+tracksRegLiveness: true
+machineFunctionInfo:
+ isEntryFunction: true
+ stackPtrOffsetReg: '$sgpr32'
+ occupancy: 10
+ sgprForEXECCopy: '$sgpr100_sgpr101'
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_instruction_split
+ ; CHECK: S_NOP 0, implicit-def $agpr0
+ ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0
+ ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0
+ ; CHECK-NEXT: renamable $vgpr0_vgpr1 = COPY killed renamable $sgpr0_sgpr1
+ ; CHECK-NEXT: SI_SPILL_AV64_SAVE killed $vgpr0_vgpr1, %stack.0, $sgpr32, 0, implicit $exec :: (store (s64) into %stack.0, align 4, addrspace 5)
+ ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8
+ ; CHECK-NEXT: renamable $agpr0_agpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ ; CHECK-NEXT: renamable $vgpr0_vgpr1 = SI_SPILL_AV64_RESTORE %stack.0, $sgpr32, 0, implicit $exec :: (load (s64) from %stack.0, align 4, addrspace 5)
+ ; CHECK-NEXT: renamable $vgpr18_vgpr19 = COPY killed renamable $agpr0_agpr1
+ ; CHECK-NEXT: early-clobber renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = V_MFMA_F32_32X32X8F16_vgprcd_e64 killed $vgpr0_vgpr1, $vgpr0_vgpr1, $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = COPY killed renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr10_vgpr11_vgpr12_vgpr13, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr14_vgpr15_vgpr16_vgpr17, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr2_vgpr3_vgpr4_vgpr5, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $vgpr6_vgpr7_vgpr8_vgpr9, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: S_ENDPGM 0
+ S_NOP 0, implicit-def $agpr0
+ renamable $sgpr0 = S_MOV_B32 0
+ undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec
+ renamable $sgpr1 = COPY renamable $sgpr0
+ %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1
+ renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ %0.sub9:vreg_512_align2 = COPY %0.sub8
+ undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
+ ; No VGPRs available for %0
+ S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
@@ -802,9 +1094,11 @@ body: |
...
-# Chain of 2 untied cases, but the use isn't in src2.
+# This case does not fully use %0 after the MFMA. As a result,
+# SplitKits insert a copy bundle for the subset of used lanes instead
+# of a simple copy.
---
-name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_non_src2
+name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_instruction_split_partial_uses_only
tracksRegLiveness: true
machineFunctionInfo:
isEntryFunction: true
@@ -812,7 +1106,447 @@ machineFunctionInfo:
occupancy: 10
sgprForEXECCopy: '$sgpr100_sgpr101'
body: |
- ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_non_src2
+ bb.0:
+ ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_instruction_split_partial_uses_only
+ ; CHECK: S_NOP 0, implicit-def $agpr0
+ ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0
+ ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0
+ ; CHECK-NEXT: renamable $vgpr0_vgpr1 = COPY killed renamable $sgpr0_sgpr1
+ ; CHECK-NEXT: SI_SPILL_AV64_SAVE killed $vgpr0_vgpr1, %stack.0, $sgpr32, 0, implicit $exec :: (store (s64) into %stack.0, align 4, addrspace 5)
+ ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8
+ ; CHECK-NEXT: renamable $agpr0_agpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ ; CHECK-NEXT: renamable $vgpr0_vgpr1 = COPY killed renamable $agpr0_agpr1
+ ; CHECK-NEXT: renamable $vgpr2_vgpr3 = SI_SPILL_AV64_RESTORE %stack.0, $sgpr32, 0, implicit $exec :: (load (s64) from %stack.0, align 4, addrspace 5)
+ ; CHECK-NEXT: renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 killed $vgpr2_vgpr3, $vgpr2_vgpr3, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: renamable $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3 = COPY renamable $vgpr0_vgpr1_vgpr2_vgpr3
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: renamable $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = COPY killed renamable $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5 = COPY renamable $agpr0_agpr1_agpr2_agpr3
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr10_vgpr11_vgpr12_vgpr13, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr14_vgpr15_vgpr16_vgpr17, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $vgpr2_vgpr3_vgpr4_vgpr5, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ ; CHECK-NEXT: S_ENDPGM 0
+ S_NOP 0, implicit-def $agpr0
+ renamable $sgpr0 = S_MOV_B32 0
+ undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec
+ renamable $sgpr1 = COPY renamable $sgpr0
+ %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1
+ renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ %0.sub9:vreg_512_align2 = COPY %0.sub8
+ undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
+ ; No VGPRs available for %0
+ S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ %2:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ S_ENDPGM 0
+
+...
+
+# Untied version of previous. This case does not fully use %4 after
+# the MFMA. As a result, SplitKits insert a copy bundle for the subset
+# of used lanes instead of a simple copy,
+---
+name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_instruction_split_partial_uses_only
+tracksRegLiveness: true
+machineFunctionInfo:
+ isEntryFunction: true
+ stackPtrOffsetReg: '$sgpr32'
+ occupancy: 10
+ sgprForEXECCopy: '$sgpr100_sgpr101'
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_instruction_split_partial_uses_only
+ ; CHECK: S_NOP 0, implicit-def $agpr0
+ ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0
+ ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0
+ ; CHECK-NEXT: renamable $vgpr0_vgpr1 = COPY killed renamable $sgpr0_sgpr1
+ ; CHECK-NEXT: SI_SPILL_AV64_SAVE killed $vgpr0_vgpr1, %stack.0, $sgpr32, 0, implicit $exec :: (store (s64) into %stack.0, align 4, addrspace 5)
+ ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8
+ ; CHECK-NEXT: renamable $agpr0_agpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ ; CHECK-NEXT: renamable $vgpr0_vgpr1 = SI_SPILL_AV64_RESTORE %stack.0, $sgpr32, 0, implicit $exec :: (load (s64) from %stack.0, align 4, addrspace 5)
+ ; CHECK-NEXT: renamable $vgpr18_vgpr19 = COPY killed renamable $agpr0_agpr1
+ ; CHECK-NEXT: early-clobber renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = V_MFMA_F32_32X32X8F16_vgprcd_e64 killed $vgpr0_vgpr1, $vgpr0_vgpr1, $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: renamable $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17
+ ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3 = COPY renamable $vgpr2_vgpr3_vgpr4_vgpr5
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: renamable $vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = COPY killed renamable $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5 = COPY renamable $agpr0_agpr1_agpr2_agpr3
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr10_vgpr11_vgpr12_vgpr13, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $vgpr14_vgpr15_vgpr16_vgpr17, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $vgpr2_vgpr3_vgpr4_vgpr5, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ ; CHECK-NEXT: S_ENDPGM 0
+ S_NOP 0, implicit-def $agpr0
+ renamable $sgpr0 = S_MOV_B32 0
+ undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec
+ renamable $sgpr1 = COPY renamable $sgpr0
+ %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1
+ renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ %0.sub9:vreg_512_align2 = COPY %0.sub8
+ undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
+ ; No VGPRs available for %4
+ S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ S_ENDPGM 0
+
+...
+
+---
+name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_same_subreg
+tracksRegLiveness: true
+machineFunctionInfo:
+ isEntryFunction: true
+ stackPtrOffsetReg: '$sgpr32'
+ occupancy: 10
+ sgprForEXECCopy: '$sgpr100_sgpr101'
+body: |
+ ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_same_subreg
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0
+ ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0
+ ; CHECK-NEXT: renamable $vgpr10 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0
+ ; CHECK-NEXT: renamable $vgpr0_vgpr1 = COPY killed renamable $sgpr0_sgpr1
+ ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ ; CHECK-NEXT: renamable $vgpr11 = COPY renamable $vgpr10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; CHECK-NEXT: liveins: $vcc, $vgpr0_vgpr1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5 = GLOBAL_LOAD_DWORDX4 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s128), addrspace 1)
+ ; CHECK-NEXT: renamable $vgpr6_vgpr7_vgpr8_vgpr9 = GLOBAL_LOAD_DWORDX4 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s128), addrspace 1)
+ ; CHECK-NEXT: renamable $vgpr10_vgpr11_vgpr12_vgpr13 = GLOBAL_LOAD_DWORDX4 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s128), addrspace 1)
+ ; CHECK-NEXT: renamable $vgpr14_vgpr15_vgpr16_vgpr17 = GLOBAL_LOAD_DWORDX4 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s128), addrspace 1)
+ ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 $vgpr0_vgpr1, $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ ; CHECK-NEXT: S_BRANCH %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: liveins: $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33:0x00000000FFFFFFFF
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: S_ENDPGM 0
+ bb.0:
+ S_NOP 0, implicit-def $agpr0
+ renamable $sgpr0 = S_MOV_B32 0
+ undef %0.sub8:vreg_1024_align2 = V_MOV_B32_e32 0, implicit $exec
+ renamable $sgpr1 = COPY renamable $sgpr0
+ %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1
+ renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ %0.sub9:vreg_1024_align2 = COPY %0.sub8
+
+ bb.1:
+ liveins: $vcc
+
+ %0.sub0_sub1_sub2_sub3:vreg_1024_align2 = GLOBAL_LOAD_DWORDX4 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s128), addrspace 1)
+ %0.sub4_sub5_sub6_sub7:vreg_1024_align2 = GLOBAL_LOAD_DWORDX4 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s128), addrspace 1)
+ %0.sub8_sub9_sub10_sub11:vreg_1024_align2 = GLOBAL_LOAD_DWORDX4 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s128), addrspace 1)
+ %0.sub12_sub13_sub14_sub15:vreg_1024_align2 = GLOBAL_LOAD_DWORDX4 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s128), addrspace 1)
+ %0.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7_sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15:vreg_1024_align2 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 %1, %1, %0.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7_sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15, 0, 0, 0, implicit $mode, implicit $exec
+ S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ S_BRANCH %bb.2
+
+ bb.2:
+ ; No VGPRs available for %0
+ S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ %2:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %2, %0.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ S_ENDPGM 0
+
+...
+
+---
+name: chained_mfma_dst_user_is_vgpr
+tracksRegLiveness: true
+machineFunctionInfo:
+ isEntryFunction: true
+ stackPtrOffsetReg: '$sgpr32'
+ occupancy: 10
+ sgprForEXECCopy: '$sgpr100_sgpr101'
+body: |
+ ; CHECK-LABEL: name: chained_mfma_dst_user_is_vgpr
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0
+ ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0
+ ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0
+ ; CHECK-NEXT: renamable $vgpr16_vgpr17 = COPY killed renamable $sgpr0_sgpr1
+ ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; CHECK-NEXT: liveins: $vcc, $vgpr16_vgpr17
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vgpr0_vgpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ ; CHECK-NEXT: early-clobber renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: early-clobber renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 27983881 /* reguse:VReg_512_Align2 */, killed renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ ; CHECK-NEXT: S_BRANCH %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: liveins: $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33:0x00000000FFFFFFFF
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: S_ENDPGM 0
+ bb.0:
+ successors: %bb.1(0x80000000)
+
+ S_NOP 0, implicit-def $agpr0
+ renamable $sgpr0 = S_MOV_B32 0
+ undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec
+ renamable $sgpr1 = COPY renamable $sgpr0
+ %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1
+ renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ %0.sub9:vreg_512_align2 = COPY %0.sub8
+
+ bb.1:
+ successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ liveins: $vcc
+
+ undef %2.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ early-clobber %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %2, 0, 0, 0, implicit $mode, implicit $exec
+ early-clobber %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
+ INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 27983881 /* reguse:VReg_512_Align2 */, %4
+ S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ S_BRANCH %bb.2
+
+ bb.2:
+ S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ %6:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ GLOBAL_STORE_DWORDX4_SADDR %6, %0.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %6, %0.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %6, %0.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %6, %0.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ S_ENDPGM 0
+
+...
+
+# TODO: In this trivial case, the single copy required is cheaper than
+# the tuple copy.
+---
+name: chained_mfma_dst_user_is_vgpr_small_subreg
+tracksRegLiveness: true
+machineFunctionInfo:
+ isEntryFunction: true
+ stackPtrOffsetReg: '$sgpr32'
+ occupancy: 10
+ sgprForEXECCopy: '$sgpr100_sgpr101'
+body: |
+ ; CHECK-LABEL: name: chained_mfma_dst_user_is_vgpr_small_subreg
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0
+ ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0
+ ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0
+ ; CHECK-NEXT: renamable $vgpr16_vgpr17 = COPY killed renamable $sgpr0_sgpr1
+ ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; CHECK-NEXT: liveins: $vcc, $vgpr16_vgpr17
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vgpr0_vgpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ ; CHECK-NEXT: early-clobber renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: early-clobber renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: dead renamable $vgpr0 = nofpexcept V_CVT_F16_F32_e32 killed $vgpr0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ ; CHECK-NEXT: S_BRANCH %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: liveins: $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33:0x00000000FFFFFFFF
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: S_ENDPGM 0
+ bb.0:
+ successors: %bb.1(0x80000000)
+
+ S_NOP 0, implicit-def $agpr0
+ renamable $sgpr0 = S_MOV_B32 0
+ undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec
+ renamable $sgpr1 = COPY renamable $sgpr0
+ %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1
+ renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ %0.sub9:vreg_512_align2 = COPY %0.sub8
+
+ bb.1:
+ successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ liveins: $vcc
+
+ undef %2.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ early-clobber %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %2, 0, 0, 0, implicit $mode, implicit $exec
+ early-clobber %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
+ %5:vgpr_32 = nofpexcept V_CVT_F16_F32_e32 %4.sub0, implicit $mode, implicit $exec
+ S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ S_BRANCH %bb.2
+
+ bb.2:
+ S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ %6:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ GLOBAL_STORE_DWORDX4_SADDR %6, %0.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %6, %0.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %6, %0.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %6, %0.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ S_ENDPGM 0
+
+...
+
+# Transitive user of the register is an MFMA with non-register src2
+---
+name: chained_mfma_dst_user_has_imm_src2
+tracksRegLiveness: true
+machineFunctionInfo:
+ isEntryFunction: true
+ stackPtrOffsetReg: '$sgpr32'
+ occupancy: 10
+ sgprForEXECCopy: '$sgpr100_sgpr101'
+body: |
+ ; CHECK-LABEL: name: chained_mfma_dst_user_has_imm_src2
; CHECK: bb.0:
; CHECK-NEXT: successors: %bb.1(0x80000000)
; CHECK-NEXT: {{ $}}
@@ -830,7 +1564,8 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: renamable $vgpr16_vgpr17 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1)
; CHECK-NEXT: early-clobber renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 0, 0, 0, implicit $mode, implicit $exec
- ; CHECK-NEXT: early-clobber renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 = V_MFMA_F32_32X32X8F16_vgprcd_e64 killed $vgpr4_vgpr5, $vgpr8_vgpr9, undef $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: early-clobber renamable $vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: renamable $vgpr20_vgpr21_vgpr22_vgpr23 = V_MFMA_F32_4X4X4F16_vgprcd_e64 $vgpr20_vgpr21, $vgpr18_vgpr19, 0, 0, 0, 0, implicit $mode, implicit $exec
; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc
; CHECK-NEXT: S_BRANCH %bb.2
; CHECK-NEXT: {{ $}}
@@ -853,6 +1588,8 @@ body: |
; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
; CHECK-NEXT: S_ENDPGM 0
bb.0:
+ successors: %bb.1(0x80000000)
+
S_NOP 0, implicit-def $agpr0
renamable $sgpr0 = S_MOV_B32 0
undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec
@@ -862,16 +1599,104 @@ body: |
%0.sub9:vreg_512_align2 = COPY %0.sub8
bb.1:
+ successors: %bb.1(0x40000000), %bb.2(0x40000000)
liveins: $vcc
- undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %2:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1)
- %3:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
- %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %3.sub4_sub5, %3.sub8_sub9, undef %6:vreg_512_align2, 0, 0, 0, implicit $mode, implicit $exec
+ undef %2.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ early-clobber %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %2, 0, 0, 0, implicit $mode, implicit $exec
+ early-clobber %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
+ %4.sub0_sub1_sub2_sub3:vreg_512_align2 = V_MFMA_F32_4X4X4F16_vgprcd_e64 %4.sub0_sub1, %1, 0, 0, 0, 0, implicit $mode, implicit $exec
+
S_CBRANCH_VCCNZ %bb.1, implicit $vcc
S_BRANCH %bb.2
bb.2:
- ; No VGPRs available for %0 or %4
+ S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ %6:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ GLOBAL_STORE_DWORDX4_SADDR %6, %4.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %6, %4.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %6, %4.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %6, %4.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ S_ENDPGM 0
+
+...
+
+---
+name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_has_untied_user
+tracksRegLiveness: true
+machineFunctionInfo:
+ isEntryFunction: true
+ stackPtrOffsetReg: '$sgpr32'
+ occupancy: 10
+ sgprForEXECCopy: '$sgpr100_sgpr101'
+body: |
+ ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_has_untied_user
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0
+ ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0
+ ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0
+ ; CHECK-NEXT: renamable $vgpr16_vgpr17 = COPY killed renamable $sgpr0_sgpr1
+ ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; CHECK-NEXT: liveins: $vcc, $vgpr16_vgpr17
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vgpr0_vgpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ ; CHECK-NEXT: renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: early-clobber renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ ; CHECK-NEXT: S_BRANCH %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: liveins: $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33:0x00000000FFFFFFFF
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: S_ENDPGM 0
+ bb.0:
+ S_NOP 0, implicit-def $agpr0
+ renamable $sgpr0 = S_MOV_B32 0
+ undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec
+ renamable $sgpr1 = COPY renamable $sgpr0
+ %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1
+ renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ %0.sub9:vreg_512_align2 = COPY %0.sub8
+
+ bb.1:
+ liveins: $vcc
+
+ undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
+ %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
+ S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ S_BRANCH %bb.2
+
+ bb.2:
+ ; No VGPRs available for %0
S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
@@ -889,10 +1714,8 @@ body: |
...
-# Chain of 2 untied cases, but the second mfma is a different size and
-# uses a subregister.
---
-name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_subreg
+name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_has_untied_user_with_vgpr_use
tracksRegLiveness: true
machineFunctionInfo:
isEntryFunction: true
@@ -900,7 +1723,7 @@ machineFunctionInfo:
occupancy: 10
sgprForEXECCopy: '$sgpr100_sgpr101'
body: |
- ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_chain_subreg
+ ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_has_untied_user_with_vgpr_use
; CHECK: bb.0:
; CHECK-NEXT: successors: %bb.1(0x80000000)
; CHECK-NEXT: {{ $}}
@@ -908,26 +1731,27 @@ body: |
; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0
; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec
; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0
- ; CHECK-NEXT: renamable $vgpr18_vgpr19 = COPY killed renamable $sgpr0_sgpr1
+ ; CHECK-NEXT: renamable $vgpr16_vgpr17 = COPY killed renamable $sgpr0_sgpr1
; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; CHECK-NEXT: liveins: $vcc, $vgpr18_vgpr19
+ ; CHECK-NEXT: liveins: $vcc, $vgpr16_vgpr17
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $vgpr16_vgpr17 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1)
- ; CHECK-NEXT: early-clobber renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31, 0, 0, 0, implicit $mode, implicit $exec
- ; CHECK-NEXT: renamable $vgpr0_vgpr1_vgpr2_vgpr3 = V_MFMA_F32_16X16X16F16_vgprcd_e64 $vgpr18_vgpr19, $vgpr18_vgpr19, killed $vgpr2_vgpr3_vgpr4_vgpr5, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: renamable $vgpr0_vgpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ ; CHECK-NEXT: renamable $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: early-clobber renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr16_vgpr17, $vgpr16_vgpr17, killed $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 27983881 /* reguse:VReg_512_Align2 */, renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33
; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc
; CHECK-NEXT: S_BRANCH %bb.2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
- ; CHECK-NEXT: liveins: $vgpr0_vgpr1_vgpr2_vgpr3
+ ; CHECK-NEXT: liveins: $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33:0x00000000FFFFFFFF
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3 = COPY killed renamable $vgpr0_vgpr1_vgpr2_vgpr3
; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33
; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
@@ -935,7 +1759,10 @@ body: |
; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec
- ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
; CHECK-NEXT: S_ENDPGM 0
bb.0:
S_NOP 0, implicit-def $agpr0
@@ -949,14 +1776,15 @@ body: |
bb.1:
liveins: $vcc
- undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %2:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1)
- %3:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
- %4:vreg_128_align2 = V_MFMA_F32_16X16X16F16_vgprcd_e64 %1, %1, %3.sub2_sub3_sub4_sub5, 0, 0, 0, implicit $mode, implicit $exec
+ undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
+ %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
+ INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 27983881 /* reguse:VReg_512_Align2 */, %4
S_CBRANCH_VCCNZ %bb.1, implicit $vcc
S_BRANCH %bb.2
bb.2:
- ; No VGPRs available for %0 or %4
+ ; No VGPRs available for %0
S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
@@ -966,13 +1794,16 @@ body: |
S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
%5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
- GLOBAL_STORE_DWORDX4_SADDR %5, %4, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
S_ENDPGM 0
...
---
-name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_same_subreg
+name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_has_tied_user
tracksRegLiveness: true
machineFunctionInfo:
isEntryFunction: true
@@ -980,32 +1811,115 @@ machineFunctionInfo:
occupancy: 10
sgprForEXECCopy: '$sgpr100_sgpr101'
body: |
- ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_mac_vgprcd_e64_same_subreg
+ ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_has_tied_user
; CHECK: bb.0:
; CHECK-NEXT: successors: %bb.1(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: S_NOP 0, implicit-def $agpr0
; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0
- ; CHECK-NEXT: renamable $vgpr10 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0
+ ; CHECK-NEXT: renamable $vgpr2_vgpr3 = COPY killed renamable $sgpr0_sgpr1
+ ; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; CHECK-NEXT: liveins: $vcc, $vgpr2_vgpr3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $vgpr0_vgpr1 = GLOBAL_LOAD_DWORDX2 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ ; CHECK-NEXT: early-clobber renamable $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr2_vgpr3, $vgpr2_vgpr3, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: renamable $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 $vgpr2_vgpr3, $vgpr2_vgpr3, killed $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ ; CHECK-NEXT: S_BRANCH %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: liveins: $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19:0x00000000FFFFFFFF
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ ; CHECK-NEXT: S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ ; CHECK-NEXT: renamable $vgpr0 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr8_agpr9_agpr10_agpr11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr12_agpr13_agpr14_agpr15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR renamable $vgpr0, renamable $agpr0_agpr1_agpr2_agpr3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ ; CHECK-NEXT: GLOBAL_STORE_DWORDX4_SADDR killed renamable $vgpr0, killed renamable $agpr4_agpr5_agpr6_agpr7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ ; CHECK-NEXT: S_ENDPGM 0
+ bb.0:
+ S_NOP 0, implicit-def $agpr0
+ renamable $sgpr0 = S_MOV_B32 0
+ undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec
+ renamable $sgpr1 = COPY renamable $sgpr0
+ %1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1
+ renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
+ %0.sub9:vreg_512_align2 = COPY %0.sub8
+
+ bb.1:
+ liveins: $vcc
+
+ undef %0.sub0_sub1:vreg_512_align2 = GLOBAL_LOAD_DWORDX2 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s64), addrspace 1)
+ %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, %0, 0, 0, 0, implicit $mode, implicit $exec
+ %4:vreg_512_align2 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 %1, %1, %4, 0, 0, 0, implicit $mode, implicit $exec
+ S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ S_BRANCH %bb.2
+
+ bb.2:
+ ; No VGPRs available for %0
+ S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ S_NOP 0, implicit-def $vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
+ S_NOP 0, implicit-def $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23
+ S_NOP 0, implicit-def $vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+ S_NOP 0, implicit-def $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39
+ S_NOP 0, implicit-def $vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47
+ S_NOP 0, implicit-def $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55
+ S_NOP 0, implicit-def $vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
+ %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub8_sub9_sub10_sub11, undef $sgpr0_sgpr1, 32, 0, implicit $exec :: (store (s128), align 32, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub12_sub13_sub14_sub15, undef $sgpr0_sgpr1, 48, 0, implicit $exec :: (store (s128), addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub0_sub1_sub2_sub3, undef $sgpr0_sgpr1, 0, 0, implicit $exec :: (store (s128), align 128, addrspace 1)
+ GLOBAL_STORE_DWORDX4_SADDR %5, %4.sub4_sub5_sub6_sub7, killed undef $sgpr0_sgpr1, 16, 0, implicit $exec :: (store (s128), addrspace 1)
+ S_ENDPGM 0
+
+...
+
+# Non-mac variant, src2 is an immediate.
+---
+name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_imm_src2
+tracksRegLiveness: true
+machineFunctionInfo:
+ isEntryFunction: true
+ stackPtrOffsetReg: '$sgpr32'
+ occupancy: 10
+ sgprForEXECCopy: '$sgpr100_sgpr101'
+body: |
+ ; CHECK-LABEL: name: inflate_result_to_agpr__V_MFMA_F32_32X32X8F16_vgprcd_e64_imm_src2
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: S_NOP 0, implicit-def $agpr0
+ ; CHECK-NEXT: renamable $sgpr0 = S_MOV_B32 0
+ ; CHECK-NEXT: renamable $vgpr8 = V_MOV_B32_e32 0, implicit $exec
; CHECK-NEXT: renamable $sgpr1 = COPY renamable $sgpr0
; CHECK-NEXT: renamable $vgpr0_vgpr1 = COPY killed renamable $sgpr0_sgpr1
; CHECK-NEXT: renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
- ; CHECK-NEXT: renamable $vgpr11 = COPY renamable $vgpr10
+ ; CHECK-NEXT: dead renamable $vgpr9 = COPY renamable $vgpr8
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
; CHECK-NEXT: liveins: $vcc, $vgpr0_vgpr1
; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5 = GLOBAL_LOAD_DWORDX4 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s128), addrspace 1)
- ; CHECK-NEXT: renamable $vgpr6_vgpr7_vgpr8_vgpr9 = GLOBAL_LOAD_DWORDX4 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s128), addrspace 1)
- ; CHECK-NEXT: renamable $vgpr10_vgpr11_vgpr12_vgpr13 = GLOBAL_LOAD_DWORDX4 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s128), addrspace 1)
- ; CHECK-NEXT: renamable $vgpr14_vgpr15_vgpr16_vgpr17 = GLOBAL_LOAD_DWORDX4 undef renamable $vgpr0_vgpr1, 0, 0, implicit $exec :: (load (s128), addrspace 1)
- ; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 $vgpr0_vgpr1, $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: early-clobber renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17 = V_MFMA_F32_32X32X8F16_vgprcd_e64 $vgpr0_vgpr1, $vgpr0_vgpr1, 0, 0, 0, 0, implicit $mode, implicit $exec
; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc
; CHECK-NEXT: S_BRANCH %bb.2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
- ; CHECK-NEXT: liveins: $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33:0x00000000FFFFFFFF
+ ; CHECK-NEXT: liveins: $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17:0x00000000FFFFFFFF
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: renamable $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = COPY killed renamable $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17
; CHECK-NEXT: S_NOP 0, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
@@ -1025,20 +1939,16 @@ body: |
bb.0:
S_NOP 0, implicit-def $agpr0
renamable $sgpr0 = S_MOV_B32 0
- undef %0.sub8:vreg_1024_align2 = V_MOV_B32_e32 0, implicit $exec
+ undef %0.sub8:vreg_512_align2 = V_MOV_B32_e32 0, implicit $exec
renamable $sgpr1 = COPY renamable $sgpr0
%1:vreg_64_align2 = COPY killed renamable $sgpr0_sgpr1
renamable $vcc = S_AND_B64 $exec, -1, implicit-def dead $scc
- %0.sub9:vreg_1024_align2 = COPY %0.sub8
+ %0.sub9:vreg_512_align2 = COPY %0.sub8
bb.1:
liveins: $vcc
- %0.sub0_sub1_sub2_sub3:vreg_1024_align2 = GLOBAL_LOAD_DWORDX4 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s128), addrspace 1)
- %0.sub4_sub5_sub6_sub7:vreg_1024_align2 = GLOBAL_LOAD_DWORDX4 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s128), addrspace 1)
- %0.sub8_sub9_sub10_sub11:vreg_1024_align2 = GLOBAL_LOAD_DWORDX4 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s128), addrspace 1)
- %0.sub12_sub13_sub14_sub15:vreg_1024_align2 = GLOBAL_LOAD_DWORDX4 undef %3:vreg_64_align2, 0, 0, implicit $exec :: (load (s128), addrspace 1)
- %0.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7_sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15:vreg_1024_align2 = V_MFMA_F32_32X32X8F16_mac_vgprcd_e64 %1, %1, %0.sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7_sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15, 0, 0, 0, implicit $mode, implicit $exec
+ %0:vreg_512_align2 = V_MFMA_F32_32X32X8F16_vgprcd_e64 %1, %1, 0, 0, 0, 0, implicit $mode, implicit $exec
S_CBRANCH_VCCNZ %bb.1, implicit $vcc
S_BRANCH %bb.2
diff --git a/llvm/test/CodeGen/AMDGPU/integer-canonicalizing-src-modifiers.ll b/llvm/test/CodeGen/AMDGPU/integer-canonicalizing-src-modifiers.ll
new file mode 100644
index 0000000..005c8c8
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/integer-canonicalizing-src-modifiers.ll
@@ -0,0 +1,93 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx700 < %s | FileCheck -check-prefixes=GCN,GFX7 %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck -check-prefixes=GCN,GFX9 %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-TRUE16 %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-FAKE16 %s
+
+; Demonstrate that the conversion of bitmasks affecting the sign bit on integers to srcmods
+; does not apply to canonicalizing instructions.
+
+define double @v_uitofp_i32_to_f64_abs(i32 %arg0) nounwind {
+; GCN-LABEL: v_uitofp_i32_to_f64_abs:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
+; GCN-NEXT: v_cvt_f64_u32_e32 v[0:1], v0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: v_uitofp_i32_to_f64_abs:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_cvt_f64_u32_e32 v[0:1], v0
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %arg0.abs = and i32 %arg0, u0x7fffffff
+ %cvt = uitofp i32 %arg0.abs to double
+ ret double %cvt
+}
+
+define double @v_uitofp_i32_to_f64_neg(i32 %arg0) nounwind {
+; GCN-LABEL: v_uitofp_i32_to_f64_neg:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_and_b32_e32 v0, 0x80000000, v0
+; GCN-NEXT: v_cvt_f64_u32_e32 v[0:1], v0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: v_uitofp_i32_to_f64_neg:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_and_b32_e32 v0, 0x80000000, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_cvt_f64_u32_e32 v[0:1], v0
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %arg0.neg = and i32 %arg0, u0x80000000
+ %cvt = uitofp i32 %arg0.neg to double
+ ret double %cvt
+}
+
+define double @s_uitofp_i32_to_f64_abs(i32 inreg %arg0) nounwind {
+; GCN-LABEL: s_uitofp_i32_to_f64_abs:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_bitset0_b32 s16, 31
+; GCN-NEXT: v_cvt_f64_u32_e32 v[0:1], s16
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: s_uitofp_i32_to_f64_abs:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_bitset0_b32 s0, 31
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %arg0.abs = and i32 %arg0, u0x7fffffff
+ %cvt = uitofp i32 %arg0.abs to double
+ ret double %cvt
+}
+
+define double @s_uitofp_i32_to_f64_neg(i32 inreg %arg0) nounwind {
+; GCN-LABEL: s_uitofp_i32_to_f64_neg:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_and_b32 s4, s16, 0x80000000
+; GCN-NEXT: v_cvt_f64_u32_e32 v[0:1], s4
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: s_uitofp_i32_to_f64_neg:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_and_b32 s0, s0, 0x80000000
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %arg0.neg = and i32 %arg0, u0x80000000
+ %cvt = uitofp i32 %arg0.neg to double
+ ret double %cvt
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; GFX11-FAKE16: {{.*}}
+; GFX11-TRUE16: {{.*}}
+; GFX7: {{.*}}
+; GFX9: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/integer-select-src-modifiers.ll b/llvm/test/CodeGen/AMDGPU/integer-select-src-modifiers.ll
new file mode 100644
index 0000000..b3c7ac8
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/integer-select-src-modifiers.ll
@@ -0,0 +1,1011 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx700 < %s | FileCheck -check-prefixes=GCN,GFX7 %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck -check-prefixes=GCN,GFX9 %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-TRUE16 %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX11,GFX11-FAKE16 %s
+
+define i32 @fneg_select_i32_1(i32 %cond, i32 %a, i32 %b) {
+; GCN-LABEL: fneg_select_i32_1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GCN-NEXT: v_cndmask_b32_e64 v0, v2, -v1, vcc
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: fneg_select_i32_1:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v2, -v1, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = xor i32 %a, u0x80000000
+ %cmp = icmp eq i32 %cond, zeroinitializer
+ %select = select i1 %cmp, i32 %neg.a, i32 %b
+ ret i32 %select
+}
+
+define i32 @fneg_select_i32_2(i32 %cond, i32 %a, i32 %b) {
+; GCN-LABEL: fneg_select_i32_2:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GCN-NEXT: v_cndmask_b32_e64 v0, -v1, v2, vcc
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: fneg_select_i32_2:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX11-NEXT: v_cndmask_b32_e64 v0, -v1, v2, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = xor i32 %a, u0x80000000
+ %cmp = icmp eq i32 %cond, zeroinitializer
+ %select = select i1 %cmp, i32 %b, i32 %neg.a
+ ret i32 %select
+}
+
+define i32 @fneg_select_i32_both(i32 %cond, i32 %a, i32 %b) {
+; GCN-LABEL: fneg_select_i32_both:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GCN-NEXT: v_cndmask_b32_e64 v0, -v2, -v1, vcc
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: fneg_select_i32_both:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX11-NEXT: v_cndmask_b32_e64 v0, -v2, -v1, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = xor i32 %a, u0x80000000
+ %neg.b = xor i32 %b, u0x80000000
+ %cmp = icmp eq i32 %cond, zeroinitializer
+ %select = select i1 %cmp, i32 %neg.a, i32 %neg.b
+ ret i32 %select
+}
+
+define i32 @fneg_1_fabs_2_select_i32(i32 %cond, i32 %a, i32 %b) {
+; GCN-LABEL: fneg_1_fabs_2_select_i32:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GCN-NEXT: v_cndmask_b32_e64 v0, |v1|, -v1, vcc
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: fneg_1_fabs_2_select_i32:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX11-NEXT: v_cndmask_b32_e64 v0, |v1|, -v1, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = xor i32 %a, u0x80000000
+ %abs.b = and i32 %a, u0x7fffffff
+ %cmp = icmp eq i32 %cond, zeroinitializer
+ %select = select i1 %cmp, i32 %neg.a, i32 %abs.b
+ ret i32 %select
+}
+
+define i32 @s_fneg_select_i32_1(i32 inreg %cond, i32 inreg %a, i32 inreg %b) {
+; GCN-LABEL: s_fneg_select_i32_1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_xor_b32 s4, s17, 0x80000000
+; GCN-NEXT: s_cmp_eq_u32 s16, 0
+; GCN-NEXT: s_cselect_b32 s4, s4, s18
+; GCN-NEXT: v_mov_b32_e32 v0, s4
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: s_fneg_select_i32_1:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_xor_b32 s1, s1, 0x80000000
+; GFX11-NEXT: s_cmp_eq_u32 s0, 0
+; GFX11-NEXT: s_cselect_b32 s0, s1, s2
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = xor i32 %a, u0x80000000
+ %cmp = icmp eq i32 %cond, zeroinitializer
+ %select = select i1 %cmp, i32 %neg.a, i32 %b
+ ret i32 %select
+}
+
+define i32 @s_fneg_1_fabs_2_select_i32(i32 inreg %cond, i32 %a, i32 %b) {
+; GCN-LABEL: s_fneg_1_fabs_2_select_i32:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_cmp_eq_u32 s16, 0
+; GCN-NEXT: s_cselect_b64 s[4:5], -1, 0
+; GCN-NEXT: v_cndmask_b32_e64 v0, |v0|, -v0, s[4:5]
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: s_fneg_1_fabs_2_select_i32:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_cmp_eq_u32 s0, 0
+; GFX11-NEXT: s_cselect_b32 s0, -1, 0
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: v_cndmask_b32_e64 v0, |v0|, -v0, s0
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = xor i32 %a, u0x80000000
+ %abs.b = and i32 %a, u0x7fffffff
+ %cmp = icmp eq i32 %cond, zeroinitializer
+ %select = select i1 %cmp, i32 %neg.a, i32 %abs.b
+ ret i32 %select
+}
+
+define <2 x i32> @fneg_select_v2i32_1(<2 x i32> %cond, <2 x i32> %a, <2 x i32> %b) {
+; GCN-LABEL: fneg_select_v2i32_1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GCN-NEXT: v_cndmask_b32_e64 v0, v4, -v2, vcc
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
+; GCN-NEXT: v_cndmask_b32_e64 v1, v5, -v3, vcc
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: fneg_select_v2i32_1:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v4, -v2, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v5, -v3, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = xor <2 x i32> %a, splat (i32 u0x80000000)
+ %cmp = icmp eq <2 x i32> %cond, zeroinitializer
+ %select = select <2 x i1> %cmp, <2 x i32> %neg.a, <2 x i32> %b
+ ret <2 x i32> %select
+}
+
+define <2 x i32> @fneg_select_v2i32_2(<2 x i32> %cond, <2 x i32> %a, <2 x i32> %b) {
+; GCN-LABEL: fneg_select_v2i32_2:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GCN-NEXT: v_cndmask_b32_e64 v0, -v2, v4, vcc
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
+; GCN-NEXT: v_cndmask_b32_e64 v1, -v3, v5, vcc
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: fneg_select_v2i32_2:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX11-NEXT: v_cndmask_b32_e64 v0, -v2, v4, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1
+; GFX11-NEXT: v_cndmask_b32_e64 v1, -v3, v5, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = xor <2 x i32> %a, splat (i32 u0x80000000)
+ %cmp = icmp eq <2 x i32> %cond, zeroinitializer
+ %select = select <2 x i1> %cmp, <2 x i32> %b, <2 x i32> %neg.a
+ ret <2 x i32> %select
+}
+
+define i32 @fabs_select_i32_1(i32 %cond, i32 %a, i32 %b) {
+; GCN-LABEL: fabs_select_i32_1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GCN-NEXT: v_cndmask_b32_e64 v0, v2, |v1|, vcc
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: fabs_select_i32_1:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v2, |v1|, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = and i32 %a, u0x7fffffff
+ %cmp = icmp eq i32 %cond, zeroinitializer
+ %select = select i1 %cmp, i32 %neg.a, i32 %b
+ ret i32 %select
+}
+
+define i32 @fabs_select_i32_2(i32 %cond, i32 %a, i32 %b) {
+; GCN-LABEL: fabs_select_i32_2:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GCN-NEXT: v_cndmask_b32_e64 v0, |v1|, v2, vcc
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: fabs_select_i32_2:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX11-NEXT: v_cndmask_b32_e64 v0, |v1|, v2, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = and i32 %a, u0x7fffffff
+ %cmp = icmp eq i32 %cond, zeroinitializer
+ %select = select i1 %cmp, i32 %b, i32 %neg.a
+ ret i32 %select
+}
+
+define <2 x i32> @fneg_1_fabs_2_select_v2i32(<2 x i32> %cond, <2 x i32> %a, <2 x i32> %b) {
+; GCN-LABEL: fneg_1_fabs_2_select_v2i32:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GCN-NEXT: v_cndmask_b32_e64 v0, -v2, |v2|, vcc
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
+; GCN-NEXT: v_cndmask_b32_e64 v1, -v3, |v3|, vcc
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: fneg_1_fabs_2_select_v2i32:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX11-NEXT: v_cndmask_b32_e64 v0, -v2, |v2|, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1
+; GFX11-NEXT: v_cndmask_b32_e64 v1, -v3, |v3|, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = xor <2 x i32> %a, splat (i32 u0x80000000)
+ %abs.b = and <2 x i32> %a, splat (i32 u0x7fffffff)
+ %cmp = icmp eq <2 x i32> %cond, zeroinitializer
+ %select = select <2 x i1> %cmp, <2 x i32> %abs.b, <2 x i32> %neg.a
+ ret <2 x i32> %select
+}
+
+define i32 @fneg_fabs_select_i32_1(i32 %cond, i32 %a, i32 %b) {
+; GCN-LABEL: fneg_fabs_select_i32_1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GCN-NEXT: v_cndmask_b32_e64 v0, v2, -|v1|, vcc
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: fneg_fabs_select_i32_1:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v2, -|v1|, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = or i32 %a, u0x80000000
+ %cmp = icmp eq i32 %cond, zeroinitializer
+ %select = select i1 %cmp, i32 %neg.a, i32 %b
+ ret i32 %select
+}
+
+define i32 @fneg_fabs_select_i32_2(i32 %cond, i32 %a, i32 %b) {
+; GCN-LABEL: fneg_fabs_select_i32_2:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GCN-NEXT: v_cndmask_b32_e64 v0, -|v1|, v2, vcc
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: fneg_fabs_select_i32_2:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX11-NEXT: v_cndmask_b32_e64 v0, -|v1|, v2, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = or i32 %a, u0x80000000
+ %cmp = icmp eq i32 %cond, zeroinitializer
+ %select = select i1 %cmp, i32 %b, i32 %neg.a
+ ret i32 %select
+}
+
+define <2 x i32> @fneg_fabs_select_v2i32_1(<2 x i32> %cond, <2 x i32> %a, <2 x i32> %b) {
+; GCN-LABEL: fneg_fabs_select_v2i32_1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GCN-NEXT: v_cndmask_b32_e64 v0, v4, -|v2|, vcc
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
+; GCN-NEXT: v_cndmask_b32_e64 v1, v5, -|v3|, vcc
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: fneg_fabs_select_v2i32_1:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v4, -|v2|, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v5, -|v3|, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = or <2 x i32> %a, splat (i32 u0x80000000)
+ %cmp = icmp eq <2 x i32> %cond, zeroinitializer
+ %select = select <2 x i1> %cmp, <2 x i32> %neg.a, <2 x i32> %b
+ ret <2 x i32> %select
+}
+
+define <2 x i32> @fneg_fabs_select_v2i32_2(<2 x i32> %cond, <2 x i32> %a, <2 x i32> %b) {
+; GCN-LABEL: fneg_fabs_select_v2i32_2:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GCN-NEXT: v_cndmask_b32_e64 v0, -|v2|, v4, vcc
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
+; GCN-NEXT: v_cndmask_b32_e64 v1, -|v3|, v5, vcc
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: fneg_fabs_select_v2i32_2:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX11-NEXT: v_cndmask_b32_e64 v0, -|v2|, v4, vcc_lo
+; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1
+; GFX11-NEXT: v_cndmask_b32_e64 v1, -|v3|, v5, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = or <2 x i32> %a, splat (i32 u0x80000000)
+ %cmp = icmp eq <2 x i32> %cond, zeroinitializer
+ %select = select <2 x i1> %cmp, <2 x i32> %b, <2 x i32> %neg.a
+ ret <2 x i32> %select
+}
+
+
+define <2 x i32> @s_fneg_select_v2i32_1(<2 x i32> inreg %cond, <2 x i32> inreg %a, <2 x i32> inreg %b) {
+; GCN-LABEL: s_fneg_select_v2i32_1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_xor_b32 s4, s19, 0x80000000
+; GCN-NEXT: s_xor_b32 s5, s18, 0x80000000
+; GCN-NEXT: s_cmp_eq_u32 s16, 0
+; GCN-NEXT: s_cselect_b32 s5, s5, s20
+; GCN-NEXT: s_cmp_eq_u32 s17, 0
+; GCN-NEXT: s_cselect_b32 s4, s4, s21
+; GCN-NEXT: v_mov_b32_e32 v0, s5
+; GCN-NEXT: v_mov_b32_e32 v1, s4
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: s_fneg_select_v2i32_1:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_xor_b32 s3, s3, 0x80000000
+; GFX11-NEXT: s_xor_b32 s2, s2, 0x80000000
+; GFX11-NEXT: s_cmp_eq_u32 s0, 0
+; GFX11-NEXT: s_cselect_b32 s0, s2, s16
+; GFX11-NEXT: s_cmp_eq_u32 s1, 0
+; GFX11-NEXT: s_cselect_b32 s1, s3, s17
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = xor <2 x i32> %a, splat (i32 u0x80000000)
+ %cmp = icmp eq <2 x i32> %cond, zeroinitializer
+ %select = select <2 x i1> %cmp, <2 x i32> %neg.a, <2 x i32> %b
+ ret <2 x i32> %select
+}
+
+define <2 x i32> @s_fneg_fabs_select_v2i32_2(<2 x i32> inreg %cond, <2 x i32> inreg %a, <2 x i32> inreg %b) {
+; GCN-LABEL: s_fneg_fabs_select_v2i32_2:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_bitset1_b32 s19, 31
+; GCN-NEXT: s_bitset1_b32 s18, 31
+; GCN-NEXT: s_cmp_eq_u32 s16, 0
+; GCN-NEXT: s_cselect_b32 s4, s20, s18
+; GCN-NEXT: s_cmp_eq_u32 s17, 0
+; GCN-NEXT: s_cselect_b32 s5, s21, s19
+; GCN-NEXT: v_mov_b32_e32 v0, s4
+; GCN-NEXT: v_mov_b32_e32 v1, s5
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: s_fneg_fabs_select_v2i32_2:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_bitset1_b32 s3, 31
+; GFX11-NEXT: s_bitset1_b32 s2, 31
+; GFX11-NEXT: s_cmp_eq_u32 s0, 0
+; GFX11-NEXT: s_cselect_b32 s0, s16, s2
+; GFX11-NEXT: s_cmp_eq_u32 s1, 0
+; GFX11-NEXT: s_cselect_b32 s1, s17, s3
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = or <2 x i32> %a, splat (i32 u0x80000000)
+ %cmp = icmp eq <2 x i32> %cond, zeroinitializer
+ %select = select <2 x i1> %cmp, <2 x i32> %b, <2 x i32> %neg.a
+ ret <2 x i32> %select
+}
+
+define i64 @fneg_select_i64_1(i64 %cond, i64 %a, i64 %b) {
+; GCN-LABEL: fneg_select_i64_1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GCN-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; GCN-NEXT: v_cndmask_b32_e64 v1, v5, -v3, vcc
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: fneg_select_i64_1:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1]
+; GFX11-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v5, -v3, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = xor i64 %a, u0x8000000000000000
+ %cmp = icmp eq i64 %cond, zeroinitializer
+ %select = select i1 %cmp, i64 %neg.a, i64 %b
+ ret i64 %select
+}
+
+define i64 @fneg_select_i64_2(i64 %cond, i64 %a, i64 %b) {
+; GCN-LABEL: fneg_select_i64_2:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GCN-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
+; GCN-NEXT: v_cndmask_b32_e64 v1, -v3, v5, vcc
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: fneg_select_i64_2:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1]
+; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v1, -v3, v5, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = xor i64 %a, u0x8000000000000000
+ %cmp = icmp eq i64 %cond, zeroinitializer
+ %select = select i1 %cmp, i64 %b, i64 %neg.a
+ ret i64 %select
+}
+
+define i64 @fneg_1_fabs_2_select_i64(i64 %cond, i64 %a, i64 %b) {
+; GCN-LABEL: fneg_1_fabs_2_select_i64:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GCN-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; GCN-NEXT: v_cndmask_b32_e64 v1, |v5|, -v3, vcc
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: fneg_1_fabs_2_select_i64:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1]
+; GFX11-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v1, |v5|, -v3, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = xor i64 %a, u0x8000000000000000
+ %abs.b = and i64 %b, u0x7fffffffffffffff
+ %cmp = icmp eq i64 %cond, zeroinitializer
+ %select = select i1 %cmp, i64 %neg.a, i64 %abs.b
+ ret i64 %select
+}
+
+define i64 @fabs_select_i64_1(i64 %cond, i64 %a, i64 %b) {
+; GCN-LABEL: fabs_select_i64_1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GCN-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; GCN-NEXT: v_cndmask_b32_e64 v1, v5, |v3|, vcc
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: fabs_select_i64_1:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1]
+; GFX11-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v5, |v3|, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = and i64 %a, u0x7fffffffffffffff
+ %cmp = icmp eq i64 %cond, zeroinitializer
+ %select = select i1 %cmp, i64 %neg.a, i64 %b
+ ret i64 %select
+}
+
+define i64 @fabs_select_i64_2(i64 %cond, i64 %a, i64 %b) {
+; GCN-LABEL: fabs_select_i64_2:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GCN-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
+; GCN-NEXT: v_cndmask_b32_e64 v1, |v3|, v5, vcc
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: fabs_select_i64_2:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1]
+; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v1, |v3|, v5, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = and i64 %a, u0x7fffffffffffffff
+ %cmp = icmp eq i64 %cond, zeroinitializer
+ %select = select i1 %cmp, i64 %b, i64 %neg.a
+ ret i64 %select
+}
+
+define i64 @fneg_fabs_select_i64_1(i64 %cond, i64 %a, i64 %b) {
+; GCN-LABEL: fneg_fabs_select_i64_1:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GCN-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; GCN-NEXT: v_cndmask_b32_e64 v1, v5, -|v3|, vcc
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: fneg_fabs_select_i64_1:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1]
+; GFX11-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v5, -|v3|, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = or i64 %a, u0x8000000000000000
+ %cmp = icmp eq i64 %cond, zeroinitializer
+ %select = select i1 %cmp, i64 %neg.a, i64 %b
+ ret i64 %select
+}
+
+define i64 @fneg_fabs_select_i64_2(i64 %cond, i64 %a, i64 %b) {
+; GCN-LABEL: fneg_fabs_select_i64_2:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GCN-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
+; GCN-NEXT: v_cndmask_b32_e64 v1, -|v3|, v5, vcc
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: fneg_fabs_select_i64_2:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1]
+; GFX11-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v1, -|v3|, v5, vcc_lo
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = or i64 %a, u0x8000000000000000
+ %cmp = icmp eq i64 %cond, zeroinitializer
+ %select = select i1 %cmp, i64 %b, i64 %neg.a
+ ret i64 %select
+}
+
+define i64 @s_fneg_select_i64_1(i64 inreg %cond, i64 inreg %a, i64 inreg %b) {
+; GFX7-LABEL: s_fneg_select_i64_1:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[16:17], 0
+; GFX7-NEXT: s_xor_b32 s6, s19, 0x80000000
+; GFX7-NEXT: s_and_b64 s[4:5], s[4:5], exec
+; GFX7-NEXT: s_cselect_b32 s4, s18, s20
+; GFX7-NEXT: s_cselect_b32 s5, s6, s21
+; GFX7-NEXT: v_mov_b32_e32 v0, s4
+; GFX7-NEXT: v_mov_b32_e32 v1, s5
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: s_fneg_select_i64_1:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_xor_b32 s4, s19, 0x80000000
+; GFX9-NEXT: s_cmp_eq_u64 s[16:17], 0
+; GFX9-NEXT: s_cselect_b32 s5, s18, s20
+; GFX9-NEXT: s_cselect_b32 s4, s4, s21
+; GFX9-NEXT: v_mov_b32_e32 v0, s5
+; GFX9-NEXT: v_mov_b32_e32 v1, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: s_fneg_select_i64_1:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_xor_b32 s3, s3, 0x80000000
+; GFX11-NEXT: s_cmp_eq_u64 s[0:1], 0
+; GFX11-NEXT: s_cselect_b32 s0, s2, s16
+; GFX11-NEXT: s_cselect_b32 s1, s3, s17
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = xor i64 %a, u0x8000000000000000
+ %cmp = icmp eq i64 %cond, zeroinitializer
+ %select = select i1 %cmp, i64 %neg.a, i64 %b
+ ret i64 %select
+}
+
+define i64 @s_fneg_select_i64_2(i64 inreg %cond, i64 inreg %a, i64 inreg %b) {
+; GFX7-LABEL: s_fneg_select_i64_2:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[16:17], 0
+; GFX7-NEXT: s_xor_b32 s6, s19, 0x80000000
+; GFX7-NEXT: s_and_b64 s[4:5], s[4:5], exec
+; GFX7-NEXT: s_cselect_b32 s4, s20, s18
+; GFX7-NEXT: s_cselect_b32 s5, s21, s6
+; GFX7-NEXT: v_mov_b32_e32 v0, s4
+; GFX7-NEXT: v_mov_b32_e32 v1, s5
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: s_fneg_select_i64_2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_xor_b32 s4, s19, 0x80000000
+; GFX9-NEXT: s_cmp_eq_u64 s[16:17], 0
+; GFX9-NEXT: s_cselect_b32 s5, s20, s18
+; GFX9-NEXT: s_cselect_b32 s4, s21, s4
+; GFX9-NEXT: v_mov_b32_e32 v0, s5
+; GFX9-NEXT: v_mov_b32_e32 v1, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: s_fneg_select_i64_2:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_xor_b32 s3, s3, 0x80000000
+; GFX11-NEXT: s_cmp_eq_u64 s[0:1], 0
+; GFX11-NEXT: s_cselect_b32 s0, s16, s2
+; GFX11-NEXT: s_cselect_b32 s1, s17, s3
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = xor i64 %a, u0x8000000000000000
+ %cmp = icmp eq i64 %cond, zeroinitializer
+ %select = select i1 %cmp, i64 %b, i64 %neg.a
+ ret i64 %select
+}
+
+define i64 @s_fneg_1_fabs_2_select_i64(i64 inreg %cond, i64 inreg %a, i64 inreg %b) {
+; GFX7-LABEL: s_fneg_1_fabs_2_select_i64:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[16:17], 0
+; GFX7-NEXT: s_xor_b32 s6, s19, 0x80000000
+; GFX7-NEXT: s_bitset0_b32 s21, 31
+; GFX7-NEXT: s_and_b64 s[4:5], s[4:5], exec
+; GFX7-NEXT: s_cselect_b32 s4, s18, s20
+; GFX7-NEXT: s_cselect_b32 s5, s6, s21
+; GFX7-NEXT: v_mov_b32_e32 v0, s4
+; GFX7-NEXT: v_mov_b32_e32 v1, s5
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: s_fneg_1_fabs_2_select_i64:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_xor_b32 s4, s19, 0x80000000
+; GFX9-NEXT: s_bitset0_b32 s21, 31
+; GFX9-NEXT: s_cmp_eq_u64 s[16:17], 0
+; GFX9-NEXT: s_cselect_b32 s5, s18, s20
+; GFX9-NEXT: s_cselect_b32 s4, s4, s21
+; GFX9-NEXT: v_mov_b32_e32 v0, s5
+; GFX9-NEXT: v_mov_b32_e32 v1, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: s_fneg_1_fabs_2_select_i64:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_xor_b32 s3, s3, 0x80000000
+; GFX11-NEXT: s_bitset0_b32 s17, 31
+; GFX11-NEXT: s_cmp_eq_u64 s[0:1], 0
+; GFX11-NEXT: s_cselect_b32 s0, s2, s16
+; GFX11-NEXT: s_cselect_b32 s1, s3, s17
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = xor i64 %a, u0x8000000000000000
+ %abs.b = and i64 %b, u0x7fffffffffffffff
+ %cmp = icmp eq i64 %cond, zeroinitializer
+ %select = select i1 %cmp, i64 %neg.a, i64 %abs.b
+ ret i64 %select
+}
+
+define i64 @s_fabs_select_i64_1(i64 inreg %cond, i64 inreg %a, i64 inreg %b) {
+; GFX7-LABEL: s_fabs_select_i64_1:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[16:17], 0
+; GFX7-NEXT: s_bitset0_b32 s19, 31
+; GFX7-NEXT: s_and_b64 s[4:5], s[4:5], exec
+; GFX7-NEXT: s_cselect_b32 s4, s18, s20
+; GFX7-NEXT: s_cselect_b32 s5, s19, s21
+; GFX7-NEXT: v_mov_b32_e32 v0, s4
+; GFX7-NEXT: v_mov_b32_e32 v1, s5
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: s_fabs_select_i64_1:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_bitset0_b32 s19, 31
+; GFX9-NEXT: s_cmp_eq_u64 s[16:17], 0
+; GFX9-NEXT: s_cselect_b32 s4, s18, s20
+; GFX9-NEXT: s_cselect_b32 s5, s19, s21
+; GFX9-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: s_fabs_select_i64_1:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_bitset0_b32 s3, 31
+; GFX11-NEXT: s_cmp_eq_u64 s[0:1], 0
+; GFX11-NEXT: s_cselect_b32 s0, s2, s16
+; GFX11-NEXT: s_cselect_b32 s1, s3, s17
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = and i64 %a, u0x7fffffffffffffff
+ %cmp = icmp eq i64 %cond, zeroinitializer
+ %select = select i1 %cmp, i64 %neg.a, i64 %b
+ ret i64 %select
+}
+
+define i64 @s_fabs_select_i64_2(i64 inreg %cond, i64 inreg %a, i64 inreg %b) {
+; GFX7-LABEL: s_fabs_select_i64_2:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[16:17], 0
+; GFX7-NEXT: s_bitset0_b32 s19, 31
+; GFX7-NEXT: s_and_b64 s[4:5], s[4:5], exec
+; GFX7-NEXT: s_cselect_b32 s4, s20, s18
+; GFX7-NEXT: s_cselect_b32 s5, s21, s19
+; GFX7-NEXT: v_mov_b32_e32 v0, s4
+; GFX7-NEXT: v_mov_b32_e32 v1, s5
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: s_fabs_select_i64_2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_bitset0_b32 s19, 31
+; GFX9-NEXT: s_cmp_eq_u64 s[16:17], 0
+; GFX9-NEXT: s_cselect_b32 s4, s20, s18
+; GFX9-NEXT: s_cselect_b32 s5, s21, s19
+; GFX9-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: s_fabs_select_i64_2:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_bitset0_b32 s3, 31
+; GFX11-NEXT: s_cmp_eq_u64 s[0:1], 0
+; GFX11-NEXT: s_cselect_b32 s0, s16, s2
+; GFX11-NEXT: s_cselect_b32 s1, s17, s3
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = and i64 %a, u0x7fffffffffffffff
+ %cmp = icmp eq i64 %cond, zeroinitializer
+ %select = select i1 %cmp, i64 %b, i64 %neg.a
+ ret i64 %select
+}
+
+define i64 @s_fneg_fabs_select_i64_1(i64 inreg %cond, i64 inreg %a, i64 inreg %b) {
+; GFX7-LABEL: s_fneg_fabs_select_i64_1:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[16:17], 0
+; GFX7-NEXT: s_bitset1_b32 s19, 31
+; GFX7-NEXT: s_and_b64 s[4:5], s[4:5], exec
+; GFX7-NEXT: s_cselect_b32 s4, s18, s20
+; GFX7-NEXT: s_cselect_b32 s5, s19, s21
+; GFX7-NEXT: v_mov_b32_e32 v0, s4
+; GFX7-NEXT: v_mov_b32_e32 v1, s5
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: s_fneg_fabs_select_i64_1:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_bitset1_b32 s19, 31
+; GFX9-NEXT: s_cmp_eq_u64 s[16:17], 0
+; GFX9-NEXT: s_cselect_b32 s4, s18, s20
+; GFX9-NEXT: s_cselect_b32 s5, s19, s21
+; GFX9-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: s_fneg_fabs_select_i64_1:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_bitset1_b32 s3, 31
+; GFX11-NEXT: s_cmp_eq_u64 s[0:1], 0
+; GFX11-NEXT: s_cselect_b32 s0, s2, s16
+; GFX11-NEXT: s_cselect_b32 s1, s3, s17
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = or i64 %a, u0x8000000000000000
+ %cmp = icmp eq i64 %cond, zeroinitializer
+ %select = select i1 %cmp, i64 %neg.a, i64 %b
+ ret i64 %select
+}
+
+define i64 @s_fneg_fabs_select_i64_2(i64 inreg %cond, i64 inreg %a, i64 inreg %b) {
+; GFX7-LABEL: s_fneg_fabs_select_i64_2:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_cmp_eq_u64_e64 s[4:5], s[16:17], 0
+; GFX7-NEXT: s_bitset1_b32 s19, 31
+; GFX7-NEXT: s_and_b64 s[4:5], s[4:5], exec
+; GFX7-NEXT: s_cselect_b32 s4, s20, s18
+; GFX7-NEXT: s_cselect_b32 s5, s21, s19
+; GFX7-NEXT: v_mov_b32_e32 v0, s4
+; GFX7-NEXT: v_mov_b32_e32 v1, s5
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: s_fneg_fabs_select_i64_2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_bitset1_b32 s19, 31
+; GFX9-NEXT: s_cmp_eq_u64 s[16:17], 0
+; GFX9-NEXT: s_cselect_b32 s4, s20, s18
+; GFX9-NEXT: s_cselect_b32 s5, s21, s19
+; GFX9-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: s_fneg_fabs_select_i64_2:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_bitset1_b32 s3, 31
+; GFX11-NEXT: s_cmp_eq_u64 s[0:1], 0
+; GFX11-NEXT: s_cselect_b32 s0, s16, s2
+; GFX11-NEXT: s_cselect_b32 s1, s17, s3
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = or i64 %a, u0x8000000000000000
+ %cmp = icmp eq i64 %cond, zeroinitializer
+ %select = select i1 %cmp, i64 %b, i64 %neg.a
+ ret i64 %select
+}
+
+define i16 @fneg_select_i16_1(i16 %cond, i16 %a, i16 %b) {
+; GFX7-LABEL: fneg_select_i16_1:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_xor_b32_e32 v1, 0xffff8000, v1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: fneg_select_i16_1:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_xor_b32_e32 v1, 0xffff8000, v1
+; GFX9-NEXT: v_cmp_eq_u16_e32 vcc, 0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-TRUE16-LABEL: fneg_select_i16_1:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_xor_b16 v0.h, 0x8000, v1.l
+; GFX11-TRUE16-NEXT: v_cmp_eq_u16_e32 vcc_lo, 0, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.h, vcc_lo
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: fneg_select_i16_1:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_xor_b32_e32 v1, 0xffff8000, v1
+; GFX11-FAKE16-NEXT: v_cmp_eq_u16_e32 vcc_lo, 0, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc_lo
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = xor i16 %a, u0x8000
+ %cmp = icmp eq i16 %cond, zeroinitializer
+ %select = select i1 %cmp, i16 %neg.a, i16 %b
+ ret i16 %select
+}
+
+define i16 @fneg_select_i16_2(i16 %cond, i16 %a, i16 %b) {
+; GFX7-LABEL: fneg_select_i16_2:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_xor_b32_e32 v1, 0xffff8000, v1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: fneg_select_i16_2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_xor_b32_e32 v1, 0xffff8000, v1
+; GFX9-NEXT: v_cmp_eq_u16_e32 vcc, 0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-TRUE16-LABEL: fneg_select_i16_2:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_xor_b16 v0.h, 0x8000, v1.l
+; GFX11-TRUE16-NEXT: v_cmp_eq_u16_e32 vcc_lo, 0, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v0.h, v2.l, vcc_lo
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: fneg_select_i16_2:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_xor_b32_e32 v1, 0xffff8000, v1
+; GFX11-FAKE16-NEXT: v_cmp_eq_u16_e32 vcc_lo, 0, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = xor i16 %a, u0x8000
+ %cmp = icmp eq i16 %cond, zeroinitializer
+ %select = select i1 %cmp, i16 %b, i16 %neg.a
+ ret i16 %select
+}
+
+define i16 @fneg_select_i16_both(i16 %cond, i16 %a, i16 %b) {
+; GFX7-LABEL: fneg_select_i16_both:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
+; GFX7-NEXT: v_xor_b32_e32 v0, 0xffff8000, v0
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: fneg_select_i16_both:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u16_e32 vcc, 0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
+; GFX9-NEXT: v_xor_b32_e32 v0, 0xffff8000, v0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-TRUE16-LABEL: fneg_select_i16_both:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_eq_u16_e32 vcc_lo, 0, v0.l
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v1.l, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-TRUE16-NEXT: v_xor_b16 v0.l, 0x8000, v0.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: fneg_select_i16_both:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_eq_u16_e32 vcc_lo, 0, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_xor_b32_e32 v0, 0xffff8000, v0
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = xor i16 %a, u0x8000
+ %neg.b = xor i16 %b, u0x8000
+ %cmp = icmp eq i16 %cond, zeroinitializer
+ %select = select i1 %cmp, i16 %neg.a, i16 %neg.b
+ ret i16 %select
+}
+
+define i16 @fneg_1_fabs_2_select_i16(i16 %cond, i16 %a, i16 %b) {
+; GFX7-LABEL: fneg_1_fabs_2_select_i16:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX7-NEXT: v_xor_b32_e32 v2, 0xffff8000, v1
+; GFX7-NEXT: v_and_b32_e32 v1, 0x7fff, v1
+; GFX7-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GFX7-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: fneg_1_fabs_2_select_i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_xor_b32_e32 v2, 0xffff8000, v1
+; GFX9-NEXT: v_and_b32_e32 v1, 0x7fff, v1
+; GFX9-NEXT: v_cmp_eq_u16_e32 vcc, 0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-TRUE16-LABEL: fneg_1_fabs_2_select_i16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_xor_b16 v0.h, 0x8000, v1.l
+; GFX11-TRUE16-NEXT: v_and_b16 v1.l, 0x7fff, v1.l
+; GFX11-TRUE16-NEXT: v_cmp_eq_u16_e32 vcc_lo, 0, v0.l
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v1.l, v0.h, vcc_lo
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: fneg_1_fabs_2_select_i16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_xor_b32_e32 v2, 0xffff8000, v1
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0x7fff, v1
+; GFX11-FAKE16-NEXT: v_cmp_eq_u16_e32 vcc_lo, 0, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+ %neg.a = xor i16 %a, u0x8000
+ %abs.b = and i16 %a, u0x7fff
+ %cmp = icmp eq i16 %cond, zeroinitializer
+ %select = select i1 %cmp, i16 %neg.a, i16 %abs.b
+ ret i16 %select
+}
diff --git a/llvm/test/CodeGen/AMDGPU/issue130120-eliminate-frame-index.ll b/llvm/test/CodeGen/AMDGPU/issue130120-eliminate-frame-index.ll
index 1c298014..3001248 100644
--- a/llvm/test/CodeGen/AMDGPU/issue130120-eliminate-frame-index.ll
+++ b/llvm/test/CodeGen/AMDGPU/issue130120-eliminate-frame-index.ll
@@ -6,16 +6,24 @@ define amdgpu_gfx [13 x i32] @issue130120() {
; CHECK: ; %bb.0: ; %bb
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_mov_b32_e32 v0, 0
-; CHECK-NEXT: s_add_i32 s0, s32, 0xf0
-; CHECK-NEXT: s_add_i32 s1, s32, 0xf4
-; CHECK-NEXT: s_add_i32 s2, s32, 0xf8
-; CHECK-NEXT: s_add_i32 s3, s32, 0xfc
+; CHECK-NEXT: s_movk_i32 s1, 0xf4
+; CHECK-NEXT: s_movk_i32 s2, 0xf8
+; CHECK-NEXT: s_movk_i32 s3, 0xfc
+; CHECK-NEXT: s_movk_i32 s34, 0x100
; CHECK-NEXT: v_mov_b32_e32 v1, v0
-; CHECK-NEXT: s_add_i32 s34, s32, 0x100
-; CHECK-NEXT: s_add_i32 s35, s32, 0x104
-; CHECK-NEXT: s_add_i32 s36, s32, 0x108
-; CHECK-NEXT: s_add_i32 s37, s32, 0x110
-; CHECK-NEXT: s_add_i32 s38, s32, 0x120
+; CHECK-NEXT: s_movk_i32 s35, 0x104
+; CHECK-NEXT: s_movk_i32 s36, 0x108
+; CHECK-NEXT: s_movk_i32 s37, 0x110
+; CHECK-NEXT: s_movk_i32 s38, 0x120
+; CHECK-NEXT: s_add_i32 s0, s32, 0xf0
+; CHECK-NEXT: s_add_i32 s1, s32, s1
+; CHECK-NEXT: s_add_i32 s2, s32, s2
+; CHECK-NEXT: s_add_i32 s3, s32, s3
+; CHECK-NEXT: s_add_i32 s34, s32, s34
+; CHECK-NEXT: s_add_i32 s35, s32, s35
+; CHECK-NEXT: s_add_i32 s36, s32, s36
+; CHECK-NEXT: s_add_i32 s37, s32, s37
+; CHECK-NEXT: s_add_i32 s38, s32, s38
; CHECK-NEXT: s_or_b32 s39, s32, 4
; CHECK-NEXT: s_or_b32 s40, s32, 8
; CHECK-NEXT: s_or_b32 s41, s32, 12
diff --git a/llvm/test/CodeGen/AMDGPU/literal64.ll b/llvm/test/CodeGen/AMDGPU/literal64.ll
index 768c972..98691d3 100644
--- a/llvm/test/CodeGen/AMDGPU/literal64.ll
+++ b/llvm/test/CodeGen/AMDGPU/literal64.ll
@@ -67,24 +67,8 @@ define void @v_mov_b64_double(ptr addrspace(1) %ptr) {
; GCN: ; %bb.0:
; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: global_load_b64 v[4:5], v[0:1], off
-; GCN-NEXT: s_mov_b32 s0, 0
-; GCN-NEXT: .LBB6_1: ; %atomicrmw.start
-; GCN-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN-NEXT: s_wait_loadcnt 0x0
-; GCN-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GCN-NEXT: v_add_f64_e32 v[2:3], lit64(0x4063233333333333), v[4:5]
-; GCN-NEXT: global_atomic_cmpswap_b64 v[2:3], v[0:1], v[2:5], off th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GCN-NEXT: s_wait_loadcnt 0x0
-; GCN-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
-; GCN-NEXT: s_wait_xcnt 0x0
-; GCN-NEXT: v_mov_b64_e32 v[4:5], v[2:3]
-; GCN-NEXT: s_or_b32 s0, vcc_lo, s0
-; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GCN-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; GCN-NEXT: s_cbranch_execnz .LBB6_1
-; GCN-NEXT: ; %bb.2: ; %atomicrmw.end
-; GCN-NEXT: s_or_b32 exec_lo, exec_lo, s0
+; GCN-NEXT: v_mov_b64_e32 v[2:3], lit64(0x4063233333333333)
+; GCN-NEXT: global_atomic_add_f64 v[0:1], v[2:3], off scope:SCOPE_SYS
; GCN-NEXT: s_set_pc_i64 s[30:31]
%result = atomicrmw fadd ptr addrspace(1) %ptr, double 153.1 monotonic
ret void
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.buffer.load.ll
index 7a20b5c..a2c1545 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.buffer.load.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.atomic.buffer.load.ll
@@ -1,27 +1,52 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -global-isel=0 | FileCheck %s -check-prefixes=CHECK,CHECK-SDAG-TRUE16
-; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -global-isel=0 | FileCheck %s -check-prefixes=CHECK,CHECK-FAKE16
-; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -global-isel=1 -new-reg-bank-select | FileCheck %s -check-prefixes=CHECK,CHECK-GISEL
-; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -global-isel=1 -new-reg-bank-select | FileCheck %s -check-prefixes=CHECK,CHECK-GISEL
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -global-isel=0 | FileCheck %s -check-prefixes=GFX11,GFX11-SDAG-TRUE16
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -global-isel=0 | FileCheck %s -check-prefixes=GFX11,GFX11-FAKE16
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -global-isel=1 | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL-TRUE16
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -global-isel=1 | FileCheck %s -check-prefixes=GFX11,GFX11-FAKE16
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -global-isel=1 -new-reg-bank-select | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -global-isel=1 -new-reg-bank-select | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 -global-isel=0 | FileCheck %s -check-prefixes=GFX12,GFX12-SDAG-TRUE16
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 -global-isel=0 | FileCheck %s -check-prefixes=GFX12,GFX12-FAKE16
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 -global-isel=1 | FileCheck %s -check-prefixes=GFX12,GFX12-GISEL-TRUE16
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 -global-isel=1 | FileCheck %s -check-prefixes=GFX12,GFX12-FAKE16
define amdgpu_kernel void @raw_atomic_buffer_load_i32(<4 x i32> %addr) {
-; CHECK-LABEL: raw_atomic_buffer_load_i32:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: .LBB0_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB0_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: raw_atomic_buffer_load_i32:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: .LBB0_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB0_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_atomic_buffer_load_i32:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: .LBB0_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], null th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB0_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -34,23 +59,42 @@ bb2:
}
define amdgpu_kernel void @raw_atomic_buffer_load_i32_off(<4 x i32> %addr) {
-; CHECK-LABEL: raw_atomic_buffer_load_i32_off:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: .LBB1_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB1_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: raw_atomic_buffer_load_i32_off:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: .LBB1_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB1_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_atomic_buffer_load_i32_off:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: .LBB1_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], null th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB1_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -62,23 +106,43 @@ bb2:
ret void
}
define amdgpu_kernel void @raw_atomic_buffer_load_i32_soff(<4 x i32> %addr) {
-; CHECK-LABEL: raw_atomic_buffer_load_i32_soff:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: .LBB2_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 4 offset:4 glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB2_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: raw_atomic_buffer_load_i32_soff:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: .LBB2_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 4 offset:4 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB2_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_atomic_buffer_load_i32_soff:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: s_mov_b32 s5, 4
+; GFX12-NEXT: .LBB2_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], s5 offset:4 th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB2_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -90,23 +154,42 @@ bb2:
ret void
}
define amdgpu_kernel void @raw_atomic_buffer_load_i32_dlc(<4 x i32> %addr) {
-; CHECK-LABEL: raw_atomic_buffer_load_i32_dlc:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: .LBB3_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 offset:4 dlc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB3_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: raw_atomic_buffer_load_i32_dlc:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: .LBB3_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 0 offset:4 dlc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB3_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_atomic_buffer_load_i32_dlc:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: .LBB3_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], null offset:4 th:TH_LOAD_NT_RT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB3_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -119,24 +202,44 @@ bb2:
}
define amdgpu_kernel void @raw_nonatomic_buffer_load_i32(<4 x i32> %addr) {
-; CHECK-LABEL: raw_nonatomic_buffer_load_i32:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 offset:4 glc
-; CHECK-NEXT: s_mov_b32 s0, 0
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
-; CHECK-NEXT: .LBB4_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_and_b32 s1, exec_lo, vcc_lo
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; CHECK-NEXT: s_or_b32 s0, s1, s0
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; CHECK-NEXT: s_cbranch_execnz .LBB4_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: raw_nonatomic_buffer_load_i32:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 0 offset:4 glc
+; GFX11-NEXT: s_mov_b32 s0, 0
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX11-NEXT: .LBB4_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_and_b32 s1, exec_lo, vcc_lo
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: s_or_b32 s0, s1, s0
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_execnz .LBB4_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_nonatomic_buffer_load_i32:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], null offset:4 th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s0, 0
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX12-NEXT: .LBB4_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_and_b32 s1, exec_lo, vcc_lo
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX12-NEXT: s_or_b32 s0, s1, s0
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-NEXT: s_cbranch_execnz .LBB4_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -149,23 +252,43 @@ bb2:
}
define amdgpu_kernel void @raw_atomic_buffer_load_i64(<4 x i32> %addr) {
-; CHECK-LABEL: raw_atomic_buffer_load_i64:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: .LBB5_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: buffer_load_b64 v[2:3], off, s[0:3], 0 offset:4 glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[2:3], v[0:1]
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB5_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: raw_atomic_buffer_load_i64:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: .LBB5_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_load_b64 v[2:3], off, s[0:3], 0 offset:4 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[2:3], v[0:1]
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB5_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_atomic_buffer_load_i64:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: v_mov_b32_e32 v1, 0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: .LBB5_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b64 v[2:3], off, s[0:3], null offset:4 th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[2:3], v[0:1]
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB5_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%id.zext = zext i32 %id to i64
@@ -179,23 +302,42 @@ bb2:
}
define amdgpu_kernel void @raw_atomic_buffer_load_v2i16(<4 x i32> %addr) {
-; CHECK-LABEL: raw_atomic_buffer_load_v2i16:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: .LBB6_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB6_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: raw_atomic_buffer_load_v2i16:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: .LBB6_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB6_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_atomic_buffer_load_v2i16:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: .LBB6_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], null th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB6_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -209,68 +351,151 @@ bb2:
}
define amdgpu_kernel void @raw_atomic_buffer_load_v4i16(<4 x i32> %addr) {
-; CHECK-SDAG-TRUE16-LABEL: raw_atomic_buffer_load_v4i16:
-; CHECK-SDAG-TRUE16: ; %bb.0: ; %bb
-; CHECK-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0
-; CHECK-SDAG-TRUE16-NEXT: .LBB7_1: ; %bb1
-; CHECK-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-SDAG-TRUE16-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
-; CHECK-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; CHECK-SDAG-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; CHECK-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; CHECK-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, v2, 16, v1
-; CHECK-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
-; CHECK-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB7_1
-; CHECK-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2
-; CHECK-SDAG-TRUE16-NEXT: s_endpgm
+; GFX11-SDAG-TRUE16-LABEL: raw_atomic_buffer_load_v4i16:
+; GFX11-SDAG-TRUE16: ; %bb.0: ; %bb
+; GFX11-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-SDAG-TRUE16-NEXT: .LBB7_1: ; %bb1
+; GFX11-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-TRUE16-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, v2, 16, v1
+; GFX11-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB7_1
+; GFX11-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2
+; GFX11-SDAG-TRUE16-NEXT: s_endpgm
;
-; CHECK-FAKE16-LABEL: raw_atomic_buffer_load_v4i16:
-; CHECK-FAKE16: ; %bb.0: ; %bb
-; CHECK-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-FAKE16-NEXT: s_mov_b32 s4, 0
-; CHECK-FAKE16-NEXT: .LBB7_1: ; %bb1
-; CHECK-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-FAKE16-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
-; CHECK-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; CHECK-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; CHECK-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; CHECK-FAKE16-NEXT: v_lshl_or_b32 v1, v2, 16, v1
-; CHECK-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
-; CHECK-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-FAKE16-NEXT: s_cbranch_execnz .LBB7_1
-; CHECK-FAKE16-NEXT: ; %bb.2: ; %bb2
-; CHECK-FAKE16-NEXT: s_endpgm
+; GFX11-FAKE16-LABEL: raw_atomic_buffer_load_v4i16:
+; GFX11-FAKE16: ; %bb.0: ; %bb
+; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: .LBB7_1: ; %bb1
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v2, 16, v1
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX11-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB7_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %bb2
+; GFX11-FAKE16-NEXT: s_endpgm
;
-; CHECK-GISEL-LABEL: raw_atomic_buffer_load_v4i16:
-; CHECK-GISEL: ; %bb.0: ; %bb
-; CHECK-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-GISEL-NEXT: s_mov_b32 s4, 0
-; CHECK-GISEL-NEXT: .LBB7_1: ; %bb1
-; CHECK-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-GISEL-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
-; CHECK-GISEL-NEXT: s_waitcnt vmcnt(0)
-; CHECK-GISEL-NEXT: v_readfirstlane_b32 s5, v1
-; CHECK-GISEL-NEXT: v_readfirstlane_b32 s6, v2
-; CHECK-GISEL-NEXT: s_pack_ll_b32_b16 s5, s5, s6
-; CHECK-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; CHECK-GISEL-NEXT: v_cmp_ne_u32_e32 vcc_lo, s5, v0
-; CHECK-GISEL-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-GISEL-NEXT: s_cbranch_execnz .LBB7_1
-; CHECK-GISEL-NEXT: ; %bb.2: ; %bb2
-; CHECK-GISEL-NEXT: s_endpgm
+; GFX11-GISEL-TRUE16-LABEL: raw_atomic_buffer_load_v4i16:
+; GFX11-GISEL-TRUE16: ; %bb.0: ; %bb
+; GFX11-GISEL-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-GISEL-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-GISEL-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-GISEL-TRUE16-NEXT: .LBB7_1: ; %bb1
+; GFX11-GISEL-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-GISEL-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-GISEL-TRUE16-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
+; GFX11-GISEL-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-TRUE16-NEXT: v_mov_b16_e32 v1.h, v2.l
+; GFX11-GISEL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-GISEL-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX11-GISEL-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-GISEL-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-GISEL-TRUE16-NEXT: s_cbranch_execnz .LBB7_1
+; GFX11-GISEL-TRUE16-NEXT: ; %bb.2: ; %bb2
+; GFX11-GISEL-TRUE16-NEXT: s_endpgm
+;
+; GFX11-GISEL-LABEL: raw_atomic_buffer_load_v4i16:
+; GFX11-GISEL: ; %bb.0: ; %bb
+; GFX11-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-GISEL-NEXT: s_mov_b32 s4, 0
+; GFX11-GISEL-NEXT: .LBB7_1: ; %bb1
+; GFX11-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-GISEL-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-GISEL-NEXT: v_readfirstlane_b32 s6, v2
+; GFX11-GISEL-NEXT: s_pack_ll_b32_b16 s5, s5, s6
+; GFX11-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-GISEL-NEXT: v_cmp_ne_u32_e32 vcc_lo, s5, v0
+; GFX11-GISEL-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-GISEL-NEXT: s_cbranch_execnz .LBB7_1
+; GFX11-GISEL-NEXT: ; %bb.2: ; %bb2
+; GFX11-GISEL-NEXT: s_endpgm
+;
+; GFX12-SDAG-TRUE16-LABEL: raw_atomic_buffer_load_v4i16:
+; GFX12-SDAG-TRUE16: ; %bb.0: ; %bb
+; GFX12-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-SDAG-TRUE16-NEXT: s_wait_xcnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX12-SDAG-TRUE16-NEXT: .LBB7_1: ; %bb1
+; GFX12-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], off, s[0:3], null offset:4 th:TH_LOAD_NT
+; GFX12-SDAG-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v2
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, v3, 16, v1
+; GFX12-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX12-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB7_1
+; GFX12-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2
+; GFX12-SDAG-TRUE16-NEXT: s_endpgm
+;
+; GFX12-FAKE16-LABEL: raw_atomic_buffer_load_v4i16:
+; GFX12-FAKE16: ; %bb.0: ; %bb
+; GFX12-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-FAKE16-NEXT: s_wait_xcnt 0x0
+; GFX12-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX12-FAKE16-NEXT: .LBB7_1: ; %bb1
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: buffer_load_b64 v[2:3], off, s[0:3], null offset:4 th:TH_LOAD_NT
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshl_or_b32 v1, v3, 16, v1
+; GFX12-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX12-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB7_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %bb2
+; GFX12-FAKE16-NEXT: s_endpgm
+;
+; GFX12-GISEL-TRUE16-LABEL: raw_atomic_buffer_load_v4i16:
+; GFX12-GISEL-TRUE16: ; %bb.0: ; %bb
+; GFX12-GISEL-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-GISEL-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-GISEL-TRUE16-NEXT: s_wait_xcnt 0x0
+; GFX12-GISEL-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX12-GISEL-TRUE16-NEXT: .LBB7_1: ; %bb1
+; GFX12-GISEL-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-GISEL-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-TRUE16-NEXT: buffer_load_b64 v[2:3], off, s[0:3], null offset:4 th:TH_LOAD_NT
+; GFX12-GISEL-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l
+; GFX12-GISEL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX12-GISEL-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX12-GISEL-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-GISEL-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-GISEL-TRUE16-NEXT: s_cbranch_execnz .LBB7_1
+; GFX12-GISEL-TRUE16-NEXT: ; %bb.2: ; %bb2
+; GFX12-GISEL-TRUE16-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -285,23 +510,42 @@ bb2:
}
define amdgpu_kernel void @raw_atomic_buffer_load_v4i32(<4 x i32> %addr) {
-; CHECK-LABEL: raw_atomic_buffer_load_v4i32:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: .LBB8_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: buffer_load_b128 v[1:4], off, s[0:3], 0 offset:4 glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v4, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB8_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: raw_atomic_buffer_load_v4i32:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: .LBB8_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_load_b128 v[1:4], off, s[0:3], 0 offset:4 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v4, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB8_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_atomic_buffer_load_v4i32:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: .LBB8_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b128 v[2:5], off, s[0:3], null offset:4 th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v5, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB8_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -315,25 +559,46 @@ bb2:
}
define amdgpu_kernel void @raw_atomic_buffer_load_ptr(<4 x i32> %addr) {
-; CHECK-LABEL: raw_atomic_buffer_load_ptr:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: .LBB9_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: flat_load_b32 v1, v[1:2]
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB9_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: raw_atomic_buffer_load_ptr:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: .LBB9_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: flat_load_b32 v1, v[1:2]
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB9_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_atomic_buffer_load_ptr:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: .LBB9_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b64 v[2:3], off, s[0:3], null offset:4 th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: flat_load_b32 v1, v[2:3]
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB9_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.atomic.fadd.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.atomic.fadd.ll
index 5c0e34c..d51e912 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.atomic.fadd.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.atomic.fadd.ll
@@ -1,58 +1,95 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx908 < %s | FileCheck -check-prefix=CHECK %s
+; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx908 < %s | FileCheck -check-prefix=GFX9 %s
+; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 < %s | FileCheck -check-prefix=GFX12 %s
define void @raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(float %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
-; CHECK-LABEL: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
-; CHECK: ; %bb.0:
-; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: buffer_atomic_add_f32 v0, v1, s[16:19], s20 offen offset:24
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: s_setpc_b64 s[30:31]
+; GFX9-LABEL: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: buffer_atomic_add_f32 v0, v1, s[16:19], s20 offen offset:24
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_add_nc_u32_e32 v1, 24, v1
+; GFX12-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen
+; GFX12-NEXT: s_set_pc_i64 s[30:31]
%voffset.add = add i32 %voffset, 24
%ret = call float @llvm.amdgcn.raw.buffer.atomic.fadd.f32(float %val, <4 x i32> %rsrc, i32 %voffset.add, i32 %soffset, i32 0)
ret void
}
define void @raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset(float %val, <4 x i32> inreg %rsrc, i32 inreg %soffset) {
-; CHECK-LABEL: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
-; CHECK: ; %bb.0:
-; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: buffer_atomic_add_f32 v0, off, s[16:19], s20
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: s_setpc_b64 s[30:31]
+; GFX9-LABEL: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: buffer_atomic_add_f32 v0, off, s[16:19], s20
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_atomic_add_f32 v0, off, s[0:3], s16
+; GFX12-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.raw.buffer.atomic.fadd.f32(float %val, <4 x i32> %rsrc, i32 0, i32 %soffset, i32 0)
ret void
}
define void @raw_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(<2 x half> %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
-; CHECK-LABEL: raw_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
-; CHECK: ; %bb.0:
-; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: buffer_atomic_pk_add_f16 v0, v1, s[16:19], s20 offen
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: s_setpc_b64 s[30:31]
+; GFX9-LABEL: raw_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: buffer_atomic_pk_add_f16 v0, v1, s[16:19], s20 offen
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: raw_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_atomic_pk_add_f16 v0, v1, s[0:3], s16 offen
+; GFX12-NEXT: s_set_pc_i64 s[30:31]
%ret = call <2 x half> @llvm.amdgcn.raw.buffer.atomic.fadd.v2f16(<2 x half> %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret void
}
define void @raw_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset(<2 x half> %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
-; CHECK-LABEL: raw_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
-; CHECK: ; %bb.0:
-; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: buffer_atomic_pk_add_f16 v0, off, s[16:19], s20 offset:92
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: s_setpc_b64 s[30:31]
+; GFX9-LABEL: raw_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: buffer_atomic_pk_add_f16 v0, off, s[16:19], s20 offset:92
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: raw_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_atomic_pk_add_f16 v0, off, s[0:3], s16 offset:92
+; GFX12-NEXT: s_set_pc_i64 s[30:31]
%ret = call <2 x half> @llvm.amdgcn.raw.buffer.atomic.fadd.v2f16(<2 x half> %val, <4 x i32> %rsrc, i32 92, i32 %soffset, i32 0)
ret void
}
define void @raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc(float %val, <4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
-; CHECK-LABEL: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
-; CHECK: ; %bb.0:
-; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: buffer_atomic_add_f32 v0, v1, s[16:19], s20 offen slc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: s_setpc_b64 s[30:31]
+; GFX9-LABEL: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: buffer_atomic_add_f32 v0, v1, s[16:19], s20 offen slc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: raw_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_NT
+; GFX12-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.raw.buffer.atomic.fadd.f32(float %val, <4 x i32> %rsrc, i32 %voffset, i32 %soffset, i32 2)
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.load.tfe.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.load.tfe.ll
index 8a6594f..1a1a1f7 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.load.tfe.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.load.tfe.ll
@@ -6,6 +6,7 @@
; RUN: llc -mcpu=gfx1010 -mtriple=amdgcn-- < %s | FileCheck %s -check-prefixes=GFX910,GFX10
; RUN: llc -mcpu=gfx1100 -mtriple=amdgcn-- < %s | FileCheck %s -check-prefix=GFX11
; RUN: llc -mcpu=gfx1200 -mtriple=amdgcn-- < %s | FileCheck %s -check-prefix=GFX12
+; RUN: llc -mcpu=gfx1250 -mtriple=amdgcn-- < %s | FileCheck %s -check-prefix=GFX12
define amdgpu_ps void @raw_buffer_load_i8_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
; GFX67-LABEL: raw_buffer_load_i8_tfe:
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.store.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.store.ll
index 89511de..eeea1456 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.store.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.buffer.store.ll
@@ -3,6 +3,7 @@
; RUN: llc < %s -mtriple=amdgcn -mcpu=tonga | FileCheck -check-prefixes=GFX68,GFX8 %s
; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 | FileCheck -check-prefixes=GFX11 %s
; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1200 | FileCheck -check-prefixes=GFX12 %s
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 | FileCheck -check-prefixes=GFX12 %s
define amdgpu_ps void @buffer_store(<4 x i32> inreg, <4 x float>, <4 x float>, <4 x float>) {
; GFX68-LABEL: buffer_store:
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.atomic.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.atomic.buffer.load.ll
index 561ec7d..6f7c001 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.atomic.buffer.load.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.atomic.buffer.load.ll
@@ -1,27 +1,52 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -global-isel=0 | FileCheck %s -check-prefixes=CHECK,CHECK-SDAG-TRUE16
-; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -global-isel=0 | FileCheck %s -check-prefixes=CHECK,CHECK-FAKE16
-; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -global-isel=1 -new-reg-bank-select | FileCheck %s -check-prefixes=CHECK,CHECK-GISEL
-; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -global-isel=1 -new-reg-bank-select | FileCheck %s -check-prefixes=CHECK,CHECK-GISEL
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -global-isel=0 | FileCheck %s -check-prefixes=GFX11,GFX11-SDAG-TRUE16
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -global-isel=0 | FileCheck %s -check-prefixes=GFX11,GFX11-FAKE16
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -global-isel=1 | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL-TRUE16
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -global-isel=1 | FileCheck %s -check-prefixes=GFX11,GFX11-FAKE16
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -global-isel=1 -new-reg-bank-select | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -global-isel=1 -new-reg-bank-select | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 -global-isel=0 | FileCheck %s -check-prefixes=GFX12,GFX12-SDAG-TRUE16
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 -global-isel=0 | FileCheck %s -check-prefixes=GFX12,GFX12-FAKE16
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 -global-isel=1 | FileCheck %s -check-prefixes=GFX12,GFX12-GISEL-TRUE16
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 -global-isel=1 | FileCheck %s -check-prefixes=GFX12,GFX12-FAKE16
define amdgpu_kernel void @raw_ptr_atomic_buffer_ptr_load_i32(ptr addrspace(8) %ptr) {
-; CHECK-LABEL: raw_ptr_atomic_buffer_ptr_load_i32:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: .LBB0_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB0_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: raw_ptr_atomic_buffer_ptr_load_i32:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: .LBB0_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB0_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_ptr_atomic_buffer_ptr_load_i32:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: .LBB0_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], null th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB0_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -34,23 +59,42 @@ bb2:
}
define amdgpu_kernel void @raw_ptr_atomic_buffer_load_i32_off(ptr addrspace(8) %ptr) {
-; CHECK-LABEL: raw_ptr_atomic_buffer_load_i32_off:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: .LBB1_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB1_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: raw_ptr_atomic_buffer_load_i32_off:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: .LBB1_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB1_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_ptr_atomic_buffer_load_i32_off:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: .LBB1_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], null th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB1_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -62,23 +106,43 @@ bb2:
ret void
}
define amdgpu_kernel void @raw_ptr_atomic_buffer_load_i32_soff(ptr addrspace(8) %ptr) {
-; CHECK-LABEL: raw_ptr_atomic_buffer_load_i32_soff:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: .LBB2_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 4 offset:4 glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB2_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: raw_ptr_atomic_buffer_load_i32_soff:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: .LBB2_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 4 offset:4 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB2_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_ptr_atomic_buffer_load_i32_soff:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: s_mov_b32 s5, 4
+; GFX12-NEXT: .LBB2_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], s5 offset:4 th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB2_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -90,23 +154,42 @@ bb2:
ret void
}
define amdgpu_kernel void @raw_ptr_atomic_buffer_load_i32_dlc(ptr addrspace(8) %ptr) {
-; CHECK-LABEL: raw_ptr_atomic_buffer_load_i32_dlc:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: .LBB3_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 offset:4 dlc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB3_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: raw_ptr_atomic_buffer_load_i32_dlc:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: .LBB3_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 0 offset:4 dlc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB3_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_ptr_atomic_buffer_load_i32_dlc:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: .LBB3_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], null offset:4 th:TH_LOAD_NT_RT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB3_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -119,24 +202,44 @@ bb2:
}
define amdgpu_kernel void @raw_nonptr_atomic_buffer_load_i32(ptr addrspace(8) %ptr) {
-; CHECK-LABEL: raw_nonptr_atomic_buffer_load_i32:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 offset:4 glc
-; CHECK-NEXT: s_mov_b32 s0, 0
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
-; CHECK-NEXT: .LBB4_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_and_b32 s1, exec_lo, vcc_lo
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; CHECK-NEXT: s_or_b32 s0, s1, s0
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; CHECK-NEXT: s_cbranch_execnz .LBB4_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: raw_nonptr_atomic_buffer_load_i32:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 0 offset:4 glc
+; GFX11-NEXT: s_mov_b32 s0, 0
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX11-NEXT: .LBB4_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_and_b32 s1, exec_lo, vcc_lo
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: s_or_b32 s0, s1, s0
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_execnz .LBB4_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_nonptr_atomic_buffer_load_i32:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], null offset:4 th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s0, 0
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX12-NEXT: .LBB4_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_and_b32 s1, exec_lo, vcc_lo
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX12-NEXT: s_or_b32 s0, s1, s0
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-NEXT: s_cbranch_execnz .LBB4_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -149,23 +252,43 @@ bb2:
}
define amdgpu_kernel void @raw_ptr_atomic_buffer_load_i64(ptr addrspace(8) %ptr) {
-; CHECK-LABEL: raw_ptr_atomic_buffer_load_i64:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: .LBB5_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: buffer_load_b64 v[2:3], off, s[0:3], 0 offset:4 glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[2:3], v[0:1]
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB5_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: raw_ptr_atomic_buffer_load_i64:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: .LBB5_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_load_b64 v[2:3], off, s[0:3], 0 offset:4 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[2:3], v[0:1]
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB5_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_ptr_atomic_buffer_load_i64:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: v_mov_b32_e32 v1, 0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: .LBB5_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b64 v[2:3], off, s[0:3], null offset:4 th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[2:3], v[0:1]
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB5_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%id.zext = zext i32 %id to i64
@@ -179,23 +302,42 @@ bb2:
}
define amdgpu_kernel void @raw_ptr_atomic_buffer_load_v2i16(ptr addrspace(8) %ptr) {
-; CHECK-LABEL: raw_ptr_atomic_buffer_load_v2i16:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: .LBB6_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB6_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: raw_ptr_atomic_buffer_load_v2i16:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: .LBB6_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 0 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB6_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_ptr_atomic_buffer_load_v2i16:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: .LBB6_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b32 v1, off, s[0:3], null th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB6_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -209,68 +351,151 @@ bb2:
}
define amdgpu_kernel void @raw_ptr_atomic_buffer_load_v4i16(ptr addrspace(8) %ptr) {
-; CHECK-SDAG-TRUE16-LABEL: raw_ptr_atomic_buffer_load_v4i16:
-; CHECK-SDAG-TRUE16: ; %bb.0: ; %bb
-; CHECK-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0
-; CHECK-SDAG-TRUE16-NEXT: .LBB7_1: ; %bb1
-; CHECK-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-SDAG-TRUE16-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
-; CHECK-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; CHECK-SDAG-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; CHECK-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; CHECK-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, v2, 16, v1
-; CHECK-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
-; CHECK-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB7_1
-; CHECK-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2
-; CHECK-SDAG-TRUE16-NEXT: s_endpgm
+; GFX11-SDAG-TRUE16-LABEL: raw_ptr_atomic_buffer_load_v4i16:
+; GFX11-SDAG-TRUE16: ; %bb.0: ; %bb
+; GFX11-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-SDAG-TRUE16-NEXT: .LBB7_1: ; %bb1
+; GFX11-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-TRUE16-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, v2, 16, v1
+; GFX11-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB7_1
+; GFX11-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2
+; GFX11-SDAG-TRUE16-NEXT: s_endpgm
;
-; CHECK-FAKE16-LABEL: raw_ptr_atomic_buffer_load_v4i16:
-; CHECK-FAKE16: ; %bb.0: ; %bb
-; CHECK-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-FAKE16-NEXT: s_mov_b32 s4, 0
-; CHECK-FAKE16-NEXT: .LBB7_1: ; %bb1
-; CHECK-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-FAKE16-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
-; CHECK-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; CHECK-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; CHECK-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; CHECK-FAKE16-NEXT: v_lshl_or_b32 v1, v2, 16, v1
-; CHECK-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
-; CHECK-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-FAKE16-NEXT: s_cbranch_execnz .LBB7_1
-; CHECK-FAKE16-NEXT: ; %bb.2: ; %bb2
-; CHECK-FAKE16-NEXT: s_endpgm
+; GFX11-FAKE16-LABEL: raw_ptr_atomic_buffer_load_v4i16:
+; GFX11-FAKE16: ; %bb.0: ; %bb
+; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: .LBB7_1: ; %bb1
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v1, v2, 16, v1
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX11-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB7_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %bb2
+; GFX11-FAKE16-NEXT: s_endpgm
;
-; CHECK-GISEL-LABEL: raw_ptr_atomic_buffer_load_v4i16:
-; CHECK-GISEL: ; %bb.0: ; %bb
-; CHECK-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-GISEL-NEXT: s_mov_b32 s4, 0
-; CHECK-GISEL-NEXT: .LBB7_1: ; %bb1
-; CHECK-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-GISEL-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
-; CHECK-GISEL-NEXT: s_waitcnt vmcnt(0)
-; CHECK-GISEL-NEXT: v_readfirstlane_b32 s5, v1
-; CHECK-GISEL-NEXT: v_readfirstlane_b32 s6, v2
-; CHECK-GISEL-NEXT: s_pack_ll_b32_b16 s5, s5, s6
-; CHECK-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; CHECK-GISEL-NEXT: v_cmp_ne_u32_e32 vcc_lo, s5, v0
-; CHECK-GISEL-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-GISEL-NEXT: s_cbranch_execnz .LBB7_1
-; CHECK-GISEL-NEXT: ; %bb.2: ; %bb2
-; CHECK-GISEL-NEXT: s_endpgm
+; GFX11-GISEL-TRUE16-LABEL: raw_ptr_atomic_buffer_load_v4i16:
+; GFX11-GISEL-TRUE16: ; %bb.0: ; %bb
+; GFX11-GISEL-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-GISEL-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-GISEL-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-GISEL-TRUE16-NEXT: .LBB7_1: ; %bb1
+; GFX11-GISEL-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-GISEL-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-GISEL-TRUE16-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
+; GFX11-GISEL-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-TRUE16-NEXT: v_mov_b16_e32 v1.h, v2.l
+; GFX11-GISEL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-GISEL-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX11-GISEL-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-GISEL-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-GISEL-TRUE16-NEXT: s_cbranch_execnz .LBB7_1
+; GFX11-GISEL-TRUE16-NEXT: ; %bb.2: ; %bb2
+; GFX11-GISEL-TRUE16-NEXT: s_endpgm
+;
+; GFX11-GISEL-LABEL: raw_ptr_atomic_buffer_load_v4i16:
+; GFX11-GISEL: ; %bb.0: ; %bb
+; GFX11-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-GISEL-NEXT: s_mov_b32 s4, 0
+; GFX11-GISEL-NEXT: .LBB7_1: ; %bb1
+; GFX11-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-GISEL-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-GISEL-NEXT: v_readfirstlane_b32 s6, v2
+; GFX11-GISEL-NEXT: s_pack_ll_b32_b16 s5, s5, s6
+; GFX11-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-GISEL-NEXT: v_cmp_ne_u32_e32 vcc_lo, s5, v0
+; GFX11-GISEL-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-GISEL-NEXT: s_cbranch_execnz .LBB7_1
+; GFX11-GISEL-NEXT: ; %bb.2: ; %bb2
+; GFX11-GISEL-NEXT: s_endpgm
+;
+; GFX12-SDAG-TRUE16-LABEL: raw_ptr_atomic_buffer_load_v4i16:
+; GFX12-SDAG-TRUE16: ; %bb.0: ; %bb
+; GFX12-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-SDAG-TRUE16-NEXT: s_wait_xcnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX12-SDAG-TRUE16-NEXT: .LBB7_1: ; %bb1
+; GFX12-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], off, s[0:3], null offset:4 th:TH_LOAD_NT
+; GFX12-SDAG-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v2
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-SDAG-TRUE16-NEXT: v_lshl_or_b32 v1, v3, 16, v1
+; GFX12-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX12-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB7_1
+; GFX12-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2
+; GFX12-SDAG-TRUE16-NEXT: s_endpgm
+;
+; GFX12-FAKE16-LABEL: raw_ptr_atomic_buffer_load_v4i16:
+; GFX12-FAKE16: ; %bb.0: ; %bb
+; GFX12-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-FAKE16-NEXT: s_wait_xcnt 0x0
+; GFX12-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX12-FAKE16-NEXT: .LBB7_1: ; %bb1
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: buffer_load_b64 v[2:3], off, s[0:3], null offset:4 th:TH_LOAD_NT
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshl_or_b32 v1, v3, 16, v1
+; GFX12-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX12-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB7_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %bb2
+; GFX12-FAKE16-NEXT: s_endpgm
+;
+; GFX12-GISEL-TRUE16-LABEL: raw_ptr_atomic_buffer_load_v4i16:
+; GFX12-GISEL-TRUE16: ; %bb.0: ; %bb
+; GFX12-GISEL-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-GISEL-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-GISEL-TRUE16-NEXT: s_wait_xcnt 0x0
+; GFX12-GISEL-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX12-GISEL-TRUE16-NEXT: .LBB7_1: ; %bb1
+; GFX12-GISEL-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-GISEL-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-TRUE16-NEXT: buffer_load_b64 v[2:3], off, s[0:3], null offset:4 th:TH_LOAD_NT
+; GFX12-GISEL-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l
+; GFX12-GISEL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX12-GISEL-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX12-GISEL-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-GISEL-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-GISEL-TRUE16-NEXT: s_cbranch_execnz .LBB7_1
+; GFX12-GISEL-TRUE16-NEXT: ; %bb.2: ; %bb2
+; GFX12-GISEL-TRUE16-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -285,23 +510,42 @@ bb2:
}
define amdgpu_kernel void @raw_ptr_atomic_buffer_load_v4i32(ptr addrspace(8) %ptr) {
-; CHECK-LABEL: raw_ptr_atomic_buffer_load_v4i32:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: .LBB8_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: buffer_load_b128 v[1:4], off, s[0:3], 0 offset:4 glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v4, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB8_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: raw_ptr_atomic_buffer_load_v4i32:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: .LBB8_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_load_b128 v[1:4], off, s[0:3], 0 offset:4 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v4, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB8_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_ptr_atomic_buffer_load_v4i32:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: .LBB8_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b128 v[2:5], off, s[0:3], null offset:4 th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v5, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB8_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -315,25 +559,46 @@ bb2:
}
define amdgpu_kernel void @raw_ptr_atomic_buffer_load_ptr(ptr addrspace(8) %ptr) {
-; CHECK-LABEL: raw_ptr_atomic_buffer_load_ptr:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: .LBB9_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: flat_load_b32 v1, v[1:2]
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB9_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: raw_ptr_atomic_buffer_load_ptr:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: .LBB9_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_load_b64 v[1:2], off, s[0:3], 0 offset:4 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: flat_load_b32 v1, v[1:2]
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB9_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: raw_ptr_atomic_buffer_load_ptr:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: .LBB9_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b64 v[2:3], off, s[0:3], null offset:4 th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: flat_load_b32 v1, v[2:3]
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB9_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2bf16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2bf16.ll
index 8b6ba1a..2c3b521 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2bf16.ll
@@ -1,104 +1,174 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; FIXME: Test 90a, 940. 908 should fail to select.
-; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX1200 %s
+; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 < %s | FileCheck -check-prefix=GFX1250 %s
define <2 x bfloat> @raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset(<2 x bfloat> %val, ptr addrspace(8) inreg %rsrc, i32 %voffset, i32 inreg %soffset) #0 {
-; GFX12-LABEL: raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_pk_add_bf16 v0, v1, s[0:3], s16 offen offset:128 th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_pk_add_bf16 v0, v1, s[0:3], s16 offen offset:128 th:TH_ATOMIC_RETURN
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_add_nc_u32_e32 v1, 0x80, v1
+; GFX1250-NEXT: buffer_atomic_pk_add_bf16 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%voffset.add = add i32 %voffset, 128
%ret = call <2 x bfloat> @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2bf16(<2 x bfloat> %val, ptr addrspace(8) %rsrc, i32 %voffset.add, i32 %soffset, i32 0)
ret <2 x bfloat> %ret
}
define <2 x bfloat> @raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset__slc(<2 x bfloat> %val, ptr addrspace(8) inreg %rsrc, i32 %voffset, i32 inreg %soffset) #0 {
-; GFX12-LABEL: raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset__slc:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_pk_add_bf16 v0, off, s[0:3], s16 offset:92 th:TH_ATOMIC_NT_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset__slc:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_pk_add_bf16 v0, off, s[0:3], s16 offset:92 th:TH_ATOMIC_NT_RETURN
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset__slc:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: buffer_atomic_pk_add_bf16 v0, off, s[0:3], s16 offset:92 th:TH_ATOMIC_NT_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call <2 x bfloat> @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2bf16(<2 x bfloat> %val, ptr addrspace(8) %rsrc, i32 92, i32 %soffset, i32 2)
ret <2 x bfloat> %ret
}
define void @raw_ptr_buffer_atomic_add_v2bf16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset(<2 x bfloat> %val, ptr addrspace(8) inreg %rsrc, i32 %voffset, i32 inreg %soffset) #0 {
-; GFX12-LABEL: raw_ptr_buffer_atomic_add_v2bf16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_pk_add_bf16 v0, v1, s[0:3], s16 offen offset:128
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: raw_ptr_buffer_atomic_add_v2bf16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_pk_add_bf16 v0, v1, s[0:3], s16 offen offset:128
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_add_v2bf16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_add_nc_u32_e32 v1, 0x80, v1
+; GFX1250-NEXT: buffer_atomic_pk_add_bf16 v0, v1, s[0:3], s16 offen
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%voffset.add = add i32 %voffset, 128
%unused = call <2 x bfloat> @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2bf16(<2 x bfloat> %val, ptr addrspace(8) %rsrc, i32 %voffset.add, i32 %soffset, i32 0)
ret void
}
define void @raw_ptr_buffer_atomic_add_v2bf16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset__slc(<2 x bfloat> %val, ptr addrspace(8) inreg %rsrc, i32 %voffset, i32 inreg %soffset) #0 {
-; GFX12-LABEL: raw_ptr_buffer_atomic_add_v2bf16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset__slc:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_pk_add_bf16 v0, off, s[0:3], s16 offset:92 th:TH_ATOMIC_NT
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: raw_ptr_buffer_atomic_add_v2bf16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset__slc:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_pk_add_bf16 v0, off, s[0:3], s16 offset:92 th:TH_ATOMIC_NT
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_add_v2bf16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset__slc:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: buffer_atomic_pk_add_bf16 v0, off, s[0:3], s16 offset:92 th:TH_ATOMIC_NT
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%unused = call <2 x bfloat> @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2bf16(<2 x bfloat> %val, ptr addrspace(8) %rsrc, i32 92, i32 %soffset, i32 2)
ret void
}
; Test waterfall loop
define <2 x bfloat> @raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__vgpr_rsrc__vgpr_voffset_add__vgpr_soffset(<2 x bfloat> %val, ptr addrspace(8) %rsrc, i32 %voffset, i32 %soffset) #0 {
-; GFX12-LABEL: raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__vgpr_rsrc__vgpr_voffset_add__vgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: s_mov_b32 s2, exec_lo
-; GFX12-NEXT: .LBB4_1: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: v_readfirstlane_b32 s4, v1
-; GFX12-NEXT: v_readfirstlane_b32 s5, v2
-; GFX12-NEXT: v_readfirstlane_b32 s6, v3
-; GFX12-NEXT: v_readfirstlane_b32 s7, v4
-; GFX12-NEXT: v_readfirstlane_b32 s3, v6
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[1:2]
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[3:4]
-; GFX12-NEXT: v_cmp_eq_u32_e64 s1, s3, v6
-; GFX12-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_b32 s0, s0, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_saveexec_b32 s0, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: buffer_atomic_pk_add_bf16 v0, v5, s[4:7], s3 offen offset:128 th:TH_ATOMIC_RETURN
-; GFX12-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4
-; GFX12-NEXT: ; implicit-def: $vgpr6
-; GFX12-NEXT: ; implicit-def: $vgpr5
-; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB4_1
-; GFX12-NEXT: ; %bb.2:
-; GFX12-NEXT: s_mov_b32 exec_lo, s2
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__vgpr_rsrc__vgpr_voffset_add__vgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: s_mov_b32 s2, exec_lo
+; GFX1200-NEXT: .LBB4_1: ; =>This Inner Loop Header: Depth=1
+; GFX1200-NEXT: v_readfirstlane_b32 s4, v1
+; GFX1200-NEXT: v_readfirstlane_b32 s5, v2
+; GFX1200-NEXT: v_readfirstlane_b32 s6, v3
+; GFX1200-NEXT: v_readfirstlane_b32 s7, v4
+; GFX1200-NEXT: v_readfirstlane_b32 s3, v6
+; GFX1200-NEXT: s_wait_alu 0xf1ff
+; GFX1200-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[1:2]
+; GFX1200-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1200-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[3:4]
+; GFX1200-NEXT: v_cmp_eq_u32_e64 s1, s3, v6
+; GFX1200-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX1200-NEXT: s_wait_alu 0xfffe
+; GFX1200-NEXT: s_and_b32 s0, s0, s1
+; GFX1200-NEXT: s_wait_alu 0xfffe
+; GFX1200-NEXT: s_and_saveexec_b32 s0, s0
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: buffer_atomic_pk_add_bf16 v0, v5, s[4:7], s3 offen offset:128 th:TH_ATOMIC_RETURN
+; GFX1200-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4
+; GFX1200-NEXT: ; implicit-def: $vgpr6
+; GFX1200-NEXT: ; implicit-def: $vgpr5
+; GFX1200-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX1200-NEXT: s_cbranch_execnz .LBB4_1
+; GFX1200-NEXT: ; %bb.2:
+; GFX1200-NEXT: s_mov_b32 exec_lo, s2
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_add_v2bf16_rtn__vgpr_val__vgpr_rsrc__vgpr_voffset_add__vgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v11, v4 :: v_dual_mov_b32 v10, v3
+; GFX1250-NEXT: v_dual_mov_b32 v9, v2 :: v_dual_mov_b32 v8, v1
+; GFX1250-NEXT: v_add_nc_u32_e32 v1, 0x80, v5
+; GFX1250-NEXT: s_mov_b32 s2, exec_lo
+; GFX1250-NEXT: .LBB4_1: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT: v_readfirstlane_b32 s4, v8
+; GFX1250-NEXT: v_readfirstlane_b32 s5, v9
+; GFX1250-NEXT: v_readfirstlane_b32 s6, v10
+; GFX1250-NEXT: v_readfirstlane_b32 s7, v11
+; GFX1250-NEXT: v_readfirstlane_b32 s3, v6
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[8:9]
+; GFX1250-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[10:11]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT: v_cmp_eq_u32_e64 s1, s3, v6
+; GFX1250-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX1250-NEXT: s_and_b32 s0, s0, s1
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_and_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: buffer_atomic_pk_add_bf16 v0, v1, s[4:7], s3 offen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: ; implicit-def: $vgpr8_vgpr9_vgpr10_vgpr11
+; GFX1250-NEXT: ; implicit-def: $vgpr6
+; GFX1250-NEXT: ; implicit-def: $vgpr1
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB4_1
+; GFX1250-NEXT: ; %bb.2:
+; GFX1250-NEXT: s_mov_b32 exec_lo, s2
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%voffset.add = add i32 %voffset, 128
%ret = call <2 x bfloat> @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2bf16(<2 x bfloat> %val, ptr addrspace(8) %rsrc, i32 %voffset.add, i32 %soffset, i32 0)
ret <2 x bfloat> %ret
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd_nortn.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd_nortn.ll
index 8141e0d..ea8f836 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd_nortn.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd_nortn.ll
@@ -2,7 +2,8 @@
; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx908 < %s | FileCheck -check-prefix=GFX908 %s
; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx90a < %s | FileCheck -check-prefix=GFX90A %s
; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx942 < %s | FileCheck -check-prefix=GFX942 %s
-; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX1200 %s
+; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 < %s | FileCheck -check-prefix=GFX1250 %s
define void @raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(float %val, ptr addrspace(8) inreg %rsrc, i32 %voffset, i32 inreg %soffset) #0 {
; GFX908-LABEL: raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
@@ -26,15 +27,22 @@ define void @raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voff
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen scope:SCOPE_SYS
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen scope:SCOPE_SYS
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen scope:SCOPE_SYS
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %voffset, i32 %soffset, i32 24)
ret void
}
@@ -61,15 +69,22 @@ define void @raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_add_f32 v0, off, s[0:3], s16
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_add_f32 v0, off, s[0:3], s16
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: buffer_atomic_add_f32 v0, off, s[0:3], s16
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 0, i32 %soffset, i32 0)
ret void
}
@@ -96,15 +111,22 @@ define void @raw_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_vo
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: raw_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_pk_add_f16 v0, v1, s[0:3], s16 offen
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: raw_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_pk_add_f16 v0, v1, s[0:3], s16 offen
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: buffer_atomic_pk_add_f16 v0, v1, s[0:3], s16 offen
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call <2 x half> @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2f16(<2 x half> %val, ptr addrspace(8) %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -131,15 +153,22 @@ define void @raw_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__0_voffs
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: raw_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_pk_add_f16 v0, off, s[0:3], s16 offset:92
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: raw_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_pk_add_f16 v0, off, s[0:3], s16 offset:92
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: buffer_atomic_pk_add_f16 v0, off, s[0:3], s16 offset:92
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call <2 x half> @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2f16(<2 x half> %val, ptr addrspace(8) %rsrc, i32 92, i32 %soffset, i32 0)
ret void
}
@@ -166,15 +195,22 @@ define void @raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voff
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_NT
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_NT
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_NT
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %voffset, i32 %soffset, i32 2)
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd_rtn.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd_rtn.ll
index 767117d..2838740 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd_rtn.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.atomic.fadd_rtn.ll
@@ -1,7 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx90a < %s | FileCheck -check-prefix=GFX90A %s
; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx942 < %s | FileCheck -check-prefix=GFX942 %s
-; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX1200 %s
+; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 < %s | FileCheck -check-prefix=GFX1250 %s
define float @raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(float %val, ptr addrspace(8) inreg %rsrc, i32 %voffset, i32 inreg %soffset) #0 {
; GFX90A-LABEL: raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
@@ -18,16 +19,24 @@ define float @raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffs
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_RETURN scope:SCOPE_SYS
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_RETURN scope:SCOPE_SYS
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %voffset, i32 %soffset, i32 24)
ret float %ret
}
@@ -47,16 +56,24 @@ define float @raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__0_voffset_
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_add_f32 v0, off, s[0:3], s16 th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_add_f32 v0, off, s[0:3], s16 th:TH_ATOMIC_RETURN
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: buffer_atomic_add_f32 v0, off, s[0:3], s16 th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 0, i32 %soffset, i32 0)
ret float %ret
}
@@ -76,16 +93,24 @@ define <2 x half> @raw_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__sgpr_rsrc__vgp
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: raw_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_pk_add_f16 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: raw_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_pk_add_f16 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_RETURN
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: buffer_atomic_pk_add_f16 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call <2 x half> @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2f16(<2 x half> %val, ptr addrspace(8) %rsrc, i32 %voffset, i32 %soffset, i32 0)
ret <2 x half> %ret
}
@@ -105,16 +130,24 @@ define <2 x half> @raw_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__sgpr_rsrc__0_v
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: raw_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_pk_add_f16 v0, off, s[0:3], s16 offset:92 th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: raw_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_pk_add_f16 v0, off, s[0:3], s16 offset:92 th:TH_ATOMIC_RETURN
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: buffer_atomic_pk_add_f16 v0, off, s[0:3], s16 offset:92 th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call <2 x half> @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.v2f16(<2 x half> %val, ptr addrspace(8) %rsrc, i32 92, i32 %soffset, i32 0)
ret <2 x half> %ret
}
@@ -134,16 +167,24 @@ define float @raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffs
; GFX942-NEXT: s_waitcnt vmcnt(0)
; GFX942-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_NT_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_NT_RETURN
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: raw_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 offen th:TH_ATOMIC_NT_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.raw.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %voffset, i32 %soffset, i32 2)
ret float %ret
}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.load.bf16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.load.bf16.ll
index 3540468..4dd258b 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.load.bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.load.bf16.ll
@@ -3,7 +3,8 @@
; RUN: llc -mtriple=amdgcn -mcpu=tonga < %s | FileCheck --check-prefix=GFX8 %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck --check-prefix=GFX9 %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx1010 < %s | FileCheck --check-prefix=GFX10 %s
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 < %s | FileCheck --check-prefixes=GFX11 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 < %s | FileCheck --check-prefix=GFX11 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1250 -amdgpu-enable-delay-alu=0 < %s | FileCheck --check-prefix=GFX12 %s
define bfloat @raw_ptr_buffer_load_bf16(ptr addrspace(8) inreg %rsrc) {
; GFX7-LABEL: raw_ptr_buffer_load_bf16:
@@ -41,6 +42,14 @@ define bfloat @raw_ptr_buffer_load_bf16(ptr addrspace(8) inreg %rsrc) {
; GFX11-NEXT: buffer_load_u16 v0, off, s[0:3], 0
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: raw_ptr_buffer_load_bf16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_u16 v0, off, s[0:3], null
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: s_set_pc_i64 s[30:31]
%val = call bfloat @llvm.amdgcn.raw.ptr.buffer.load.v2bf16(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0)
ret bfloat %val
}
@@ -82,6 +91,14 @@ define <2 x bfloat> @raw_ptr_buffer_load_v2bf16(ptr addrspace(8) inreg %rsrc) {
; GFX11-NEXT: buffer_load_b32 v0, off, s[0:3], 0
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: raw_ptr_buffer_load_v2bf16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b32 v0, off, s[0:3], null
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: s_set_pc_i64 s[30:31]
%val = call <2 x bfloat> @llvm.amdgcn.raw.ptr.buffer.load.v2bf16(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0)
ret <2 x bfloat> %val
}
@@ -125,6 +142,14 @@ define <4 x bfloat> @raw_ptr_buffer_load_v4bf16(ptr addrspace(8) inreg %rsrc) {
; GFX11-NEXT: buffer_load_b64 v[0:1], off, s[0:3], 0
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: raw_ptr_buffer_load_v4bf16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b64 v[0:1], off, s[0:3], null
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: s_set_pc_i64 s[30:31]
%val = call <4 x bfloat> @llvm.amdgcn.raw.ptr.buffer.load.v4bf16(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0)
ret <4 x bfloat> %val
}
@@ -178,6 +203,14 @@ define <8 x bfloat> @raw_ptr_buffer_load_v8bf16(ptr addrspace(8) inreg %rsrc) {
; GFX11-NEXT: buffer_load_b128 v[0:3], off, s[0:3], 0
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: raw_ptr_buffer_load_v8bf16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b128 v[0:3], off, s[0:3], null
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: s_set_pc_i64 s[30:31]
%val = call <8 x bfloat> @llvm.amdgcn.raw.ptr.buffer.load.v8bf16(ptr addrspace(8) %rsrc, i32 0, i32 0, i32 0)
ret <8 x bfloat> %val
}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.store.bf16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.store.bf16.ll
index e1f84dc..ec7d7d4 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.store.bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.ptr.buffer.store.bf16.ll
@@ -3,7 +3,8 @@
; RUN: llc -mtriple=amdgcn -mcpu=tonga < %s | FileCheck --check-prefix=GFX8 %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck --check-prefix=GFX9 %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx1010 < %s | FileCheck --check-prefix=GFX10 %s
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 < %s | FileCheck --check-prefixes=GFX11 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-delay-alu=0 < %s | FileCheck --check-prefix=GFX11 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1250 -amdgpu-enable-delay-alu=0 < %s | FileCheck --check-prefix=GFX12 %s
define amdgpu_ps void @buffer_store_bf16(ptr addrspace(8) inreg %rsrc, bfloat %data, i32 %offset) {
; GFX7-LABEL: buffer_store_bf16:
@@ -32,6 +33,11 @@ define amdgpu_ps void @buffer_store_bf16(ptr addrspace(8) inreg %rsrc, bfloat %d
; GFX11: ; %bb.0:
; GFX11-NEXT: buffer_store_b16 v0, v1, s[0:3], 0 offen
; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: buffer_store_bf16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: buffer_store_b16 v0, v1, s[0:3], null offen
+; GFX12-NEXT: s_endpgm
call void @llvm.amdgcn.raw.ptr.buffer.store.bf16(bfloat %data, ptr addrspace(8) %rsrc, i32 %offset, i32 0, i32 0)
ret void
}
@@ -65,6 +71,11 @@ define amdgpu_ps void @buffer_store_v2bf16(ptr addrspace(8) inreg %rsrc, <2 x bf
; GFX11: ; %bb.0:
; GFX11-NEXT: buffer_store_b32 v0, v1, s[0:3], 0 offen
; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: buffer_store_v2bf16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: buffer_store_b32 v0, v1, s[0:3], null offen
+; GFX12-NEXT: s_endpgm
call void @llvm.amdgcn.raw.ptr.buffer.store.v2bf16(<2 x bfloat> %data, ptr addrspace(8) %rsrc, i32 %offset, i32 0, i32 0)
ret void
}
@@ -102,6 +113,11 @@ define amdgpu_ps void @buffer_store_v4bf16(ptr addrspace(8) inreg %rsrc, <4 x bf
; GFX11: ; %bb.0:
; GFX11-NEXT: buffer_store_b64 v[0:1], v2, s[0:3], 0 offen
; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: buffer_store_v4bf16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: buffer_store_b64 v[0:1], v2, s[0:3], null offen
+; GFX12-NEXT: s_endpgm
call void @llvm.amdgcn.raw.ptr.buffer.store.v4bf16(<4 x bfloat> %data, ptr addrspace(8) %rsrc, i32 %offset, i32 0, i32 0)
ret void
}
@@ -153,6 +169,11 @@ define amdgpu_ps void @buffer_store_v8bf16(ptr addrspace(8) inreg %rsrc, <8 x bf
; GFX11: ; %bb.0:
; GFX11-NEXT: buffer_store_b128 v[0:3], v4, s[0:3], 0 offen
; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: buffer_store_v8bf16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: buffer_store_b128 v[0:3], v4, s[0:3], null offen
+; GFX12-NEXT: s_endpgm
call void @llvm.amdgcn.raw.ptr.buffer.store.v8bf16(<8 x bfloat> %data, ptr addrspace(8) %rsrc, i32 %offset, i32 0, i32 0)
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.atomic.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.atomic.buffer.load.ll
index f6f614e..8896364 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.atomic.buffer.load.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.atomic.buffer.load.ll
@@ -1,30 +1,58 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=CHECK,CHECK-SDAG-TRUE16
-; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=CHECK,CHECK-FAKE16
-; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=CHECK,CHECK-GISEL
-; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=CHECK,CHECK-GISEL
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-SDAG-TRUE16
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-FAKE16
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL-TRUE16
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-FAKE16
+; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL
+; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=GFX12,GFX12-SDAG-TRUE16
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=GFX12,GFX12-FAKE16
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=GFX12,GFX12-GISEL-TRUE16
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=GFX12,GFX12-FAKE16
define amdgpu_kernel void @struct_atomic_buffer_load_i32(<4 x i32> %addr, i32 %index) {
-; CHECK-LABEL: struct_atomic_buffer_load_i32:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_clause 0x1
-; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_mov_b32_e32 v1, s6
-; CHECK-NEXT: .LBB0_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB0_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: struct_atomic_buffer_load_i32:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_clause 0x1
+; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-NEXT: .LBB0_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB0_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_atomic_buffer_load_i32:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s6
+; GFX12-NEXT: .LBB0_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], null idxen th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB0_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -37,23 +65,43 @@ bb2:
}
define amdgpu_kernel void @struct_atomic_buffer_load_i32_const_idx(<4 x i32> %addr) {
-; CHECK-LABEL: struct_atomic_buffer_load_i32_const_idx:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_and_b32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: .LBB1_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB1_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: struct_atomic_buffer_load_i32_const_idx:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: .LBB1_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB1_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_atomic_buffer_load_i32_const_idx:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: v_mov_b32_e32 v1, 15
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: .LBB1_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], null idxen th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB1_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -66,26 +114,48 @@ bb2:
}
define amdgpu_kernel void @struct_atomic_buffer_load_i32_off(<4 x i32> %addr, i32 %index) {
-; CHECK-LABEL: struct_atomic_buffer_load_i32_off:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_clause 0x1
-; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_mov_b32_e32 v1, s6
-; CHECK-NEXT: .LBB2_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB2_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: struct_atomic_buffer_load_i32_off:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_clause 0x1
+; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-NEXT: .LBB2_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB2_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_atomic_buffer_load_i32_off:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s6
+; GFX12-NEXT: .LBB2_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], null idxen th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB2_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -98,26 +168,49 @@ bb2:
}
define amdgpu_kernel void @struct_atomic_buffer_load_i32_soff(<4 x i32> %addr, i32 %index) {
-; CHECK-LABEL: struct_atomic_buffer_load_i32_soff:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_clause 0x1
-; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_mov_b32_e32 v1, s6
-; CHECK-NEXT: .LBB3_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 4 idxen offset:4 glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB3_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: struct_atomic_buffer_load_i32_soff:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_clause 0x1
+; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-NEXT: .LBB3_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 4 idxen offset:4 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB3_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_atomic_buffer_load_i32_soff:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: s_mov_b32 s5, 4
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s6
+; GFX12-NEXT: .LBB3_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], s5 idxen offset:4 th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB3_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -129,26 +222,48 @@ bb2:
ret void
}
define amdgpu_kernel void @struct_atomic_buffer_load_i32_dlc(<4 x i32> %addr, i32 %index) {
-; CHECK-LABEL: struct_atomic_buffer_load_i32_dlc:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_clause 0x1
-; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_mov_b32_e32 v1, s6
-; CHECK-NEXT: .LBB4_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen offset:4 dlc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB4_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: struct_atomic_buffer_load_i32_dlc:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_clause 0x1
+; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-NEXT: .LBB4_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen offset:4 dlc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB4_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_atomic_buffer_load_i32_dlc:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s6
+; GFX12-NEXT: .LBB4_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT_RT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB4_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -161,26 +276,49 @@ bb2:
}
define amdgpu_kernel void @struct_nonatomic_buffer_load_i32(<4 x i32> %addr, i32 %index) {
-; CHECK-LABEL: struct_nonatomic_buffer_load_i32:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_clause 0x1
-; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_dual_mov_b32 v1, s6 :: v_dual_and_b32 v0, 0x3ff, v0
-; CHECK-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 idxen offset:4 glc
-; CHECK-NEXT: s_mov_b32 s0, 0
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
-; CHECK-NEXT: .LBB5_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_and_b32 s1, exec_lo, vcc_lo
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; CHECK-NEXT: s_or_b32 s0, s1, s0
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; CHECK-NEXT: s_cbranch_execnz .LBB5_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: struct_nonatomic_buffer_load_i32:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_clause 0x1
+; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: v_dual_mov_b32 v1, s6 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX11-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 idxen offset:4 glc
+; GFX11-NEXT: s_mov_b32 s0, 0
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX11-NEXT: .LBB5_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_and_b32 s1, exec_lo, vcc_lo
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: s_or_b32 s0, s1, s0
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_execnz .LBB5_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_nonatomic_buffer_load_i32:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s6
+; GFX12-NEXT: buffer_load_b32 v1, v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s0, 0
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX12-NEXT: .LBB5_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_and_b32 s1, exec_lo, vcc_lo
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX12-NEXT: s_or_b32 s0, s1, s0
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-NEXT: s_cbranch_execnz .LBB5_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -193,26 +331,49 @@ bb2:
}
define amdgpu_kernel void @struct_atomic_buffer_load_i64(<4 x i32> %addr, i32 %index) {
-; CHECK-LABEL: struct_atomic_buffer_load_i64:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_clause 0x1
-; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_mov_b32_e32 v2, s6
-; CHECK-NEXT: .LBB6_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: buffer_load_b64 v[3:4], v2, s[0:3], 0 idxen offset:4 glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[3:4], v[0:1]
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB6_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: struct_atomic_buffer_load_i64:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_clause 0x1
+; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v2, s6
+; GFX11-NEXT: .LBB6_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: buffer_load_b64 v[3:4], v2, s[0:3], 0 idxen offset:4 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[3:4], v[0:1]
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB6_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_atomic_buffer_load_i64:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: v_mov_b32_e32 v1, 0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v2, s6
+; GFX12-NEXT: .LBB6_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], null idxen offset:4 th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[4:5], v[0:1]
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB6_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%id.zext = zext i32 %id to i64
@@ -226,26 +387,48 @@ bb2:
}
define amdgpu_kernel void @struct_atomic_buffer_load_v2i16(<4 x i32> %addr, i32 %index) {
-; CHECK-LABEL: struct_atomic_buffer_load_v2i16:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_clause 0x1
-; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_mov_b32_e32 v1, s6
-; CHECK-NEXT: .LBB7_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB7_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: struct_atomic_buffer_load_v2i16:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_clause 0x1
+; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-NEXT: .LBB7_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB7_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_atomic_buffer_load_v2i16:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s6
+; GFX12-NEXT: .LBB7_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], null idxen th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB7_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -259,77 +442,172 @@ bb2:
}
define amdgpu_kernel void @struct_atomic_buffer_load_v4i16(<4 x i32> %addr, i32 %index) {
-; CHECK-SDAG-TRUE16-LABEL: struct_atomic_buffer_load_v4i16:
-; CHECK-SDAG-TRUE16: ; %bb.0: ; %bb
-; CHECK-SDAG-TRUE16-NEXT: s_clause 0x1
-; CHECK-SDAG-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0
-; CHECK-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s6
-; CHECK-SDAG-TRUE16-NEXT: .LBB8_1: ; %bb1
-; CHECK-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
-; CHECK-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; CHECK-SDAG-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; CHECK-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; CHECK-SDAG-TRUE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2
-; CHECK-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
-; CHECK-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB8_1
-; CHECK-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2
-; CHECK-SDAG-TRUE16-NEXT: s_endpgm
+; GFX11-SDAG-TRUE16-LABEL: struct_atomic_buffer_load_v4i16:
+; GFX11-SDAG-TRUE16: ; %bb.0: ; %bb
+; GFX11-SDAG-TRUE16-NEXT: s_clause 0x1
+; GFX11-SDAG-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-SDAG-TRUE16-NEXT: .LBB8_1: ; %bb1
+; GFX11-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-SDAG-TRUE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2
+; GFX11-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB8_1
+; GFX11-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2
+; GFX11-SDAG-TRUE16-NEXT: s_endpgm
+;
+; GFX11-FAKE16-LABEL: struct_atomic_buffer_load_v4i16:
+; GFX11-FAKE16: ; %bb.0: ; %bb
+; GFX11-FAKE16-NEXT: s_clause 0x1
+; GFX11-FAKE16-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-FAKE16-NEXT: .LBB8_1: ; %bb1
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX11-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB8_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %bb2
+; GFX11-FAKE16-NEXT: s_endpgm
+;
+; GFX11-GISEL-TRUE16-LABEL: struct_atomic_buffer_load_v4i16:
+; GFX11-GISEL-TRUE16: ; %bb.0: ; %bb
+; GFX11-GISEL-TRUE16-NEXT: s_clause 0x1
+; GFX11-GISEL-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-GISEL-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-GISEL-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-GISEL-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-GISEL-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-GISEL-TRUE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-GISEL-TRUE16-NEXT: .LBB8_1: ; %bb1
+; GFX11-GISEL-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-GISEL-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
+; GFX11-GISEL-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l
+; GFX11-GISEL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-GISEL-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX11-GISEL-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-GISEL-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-GISEL-TRUE16-NEXT: s_cbranch_execnz .LBB8_1
+; GFX11-GISEL-TRUE16-NEXT: ; %bb.2: ; %bb2
+; GFX11-GISEL-TRUE16-NEXT: s_endpgm
+;
+; GFX11-GISEL-LABEL: struct_atomic_buffer_load_v4i16:
+; GFX11-GISEL: ; %bb.0: ; %bb
+; GFX11-GISEL-NEXT: s_clause 0x1
+; GFX11-GISEL-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-GISEL-NEXT: s_mov_b32 s4, 0
+; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-GISEL-NEXT: .LBB8_1: ; %bb1
+; GFX11-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-GISEL-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-NEXT: v_readfirstlane_b32 s5, v2
+; GFX11-GISEL-NEXT: v_readfirstlane_b32 s6, v3
+; GFX11-GISEL-NEXT: s_pack_ll_b32_b16 s5, s5, s6
+; GFX11-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-GISEL-NEXT: v_cmp_ne_u32_e32 vcc_lo, s5, v0
+; GFX11-GISEL-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-GISEL-NEXT: s_cbranch_execnz .LBB8_1
+; GFX11-GISEL-NEXT: ; %bb.2: ; %bb2
+; GFX11-GISEL-NEXT: s_endpgm
;
-; CHECK-FAKE16-LABEL: struct_atomic_buffer_load_v4i16:
-; CHECK-FAKE16: ; %bb.0: ; %bb
-; CHECK-FAKE16-NEXT: s_clause 0x1
-; CHECK-FAKE16-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-FAKE16-NEXT: s_mov_b32 s4, 0
-; CHECK-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-FAKE16-NEXT: v_mov_b32_e32 v1, s6
-; CHECK-FAKE16-NEXT: .LBB8_1: ; %bb1
-; CHECK-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-FAKE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
-; CHECK-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; CHECK-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; CHECK-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; CHECK-FAKE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2
-; CHECK-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
-; CHECK-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-FAKE16-NEXT: s_cbranch_execnz .LBB8_1
-; CHECK-FAKE16-NEXT: ; %bb.2: ; %bb2
-; CHECK-FAKE16-NEXT: s_endpgm
+; GFX12-SDAG-TRUE16-LABEL: struct_atomic_buffer_load_v4i16:
+; GFX12-SDAG-TRUE16: ; %bb.0: ; %bb
+; GFX12-SDAG-TRUE16-NEXT: s_clause 0x1
+; GFX12-SDAG-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-SDAG-TRUE16-NEXT: s_wait_xcnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX12-SDAG-TRUE16-NEXT: .LBB8_1: ; %bb1
+; GFX12-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT
+; GFX12-SDAG-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-SDAG-TRUE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2
+; GFX12-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX12-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB8_1
+; GFX12-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2
+; GFX12-SDAG-TRUE16-NEXT: s_endpgm
;
-; CHECK-GISEL-LABEL: struct_atomic_buffer_load_v4i16:
-; CHECK-GISEL: ; %bb.0: ; %bb
-; CHECK-GISEL-NEXT: s_clause 0x1
-; CHECK-GISEL-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-GISEL-NEXT: s_mov_b32 s4, 0
-; CHECK-GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-GISEL-NEXT: v_mov_b32_e32 v1, s6
-; CHECK-GISEL-NEXT: .LBB8_1: ; %bb1
-; CHECK-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-GISEL-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
-; CHECK-GISEL-NEXT: s_waitcnt vmcnt(0)
-; CHECK-GISEL-NEXT: v_readfirstlane_b32 s5, v2
-; CHECK-GISEL-NEXT: v_readfirstlane_b32 s6, v3
-; CHECK-GISEL-NEXT: s_pack_ll_b32_b16 s5, s5, s6
-; CHECK-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; CHECK-GISEL-NEXT: v_cmp_ne_u32_e32 vcc_lo, s5, v0
-; CHECK-GISEL-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-GISEL-NEXT: s_cbranch_execnz .LBB8_1
-; CHECK-GISEL-NEXT: ; %bb.2: ; %bb2
-; CHECK-GISEL-NEXT: s_endpgm
+; GFX12-FAKE16-LABEL: struct_atomic_buffer_load_v4i16:
+; GFX12-FAKE16: ; %bb.0: ; %bb
+; GFX12-FAKE16-NEXT: s_clause 0x1
+; GFX12-FAKE16-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-FAKE16-NEXT: s_wait_xcnt 0x0
+; GFX12-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX12-FAKE16-NEXT: .LBB8_1: ; %bb1
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2
+; GFX12-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX12-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB8_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %bb2
+; GFX12-FAKE16-NEXT: s_endpgm
+;
+; GFX12-GISEL-TRUE16-LABEL: struct_atomic_buffer_load_v4i16:
+; GFX12-GISEL-TRUE16: ; %bb.0: ; %bb
+; GFX12-GISEL-TRUE16-NEXT: s_clause 0x1
+; GFX12-GISEL-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-GISEL-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-GISEL-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-GISEL-TRUE16-NEXT: s_wait_xcnt 0x0
+; GFX12-GISEL-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX12-GISEL-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-TRUE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX12-GISEL-TRUE16-NEXT: .LBB8_1: ; %bb1
+; GFX12-GISEL-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-GISEL-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT
+; GFX12-GISEL-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l
+; GFX12-GISEL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX12-GISEL-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX12-GISEL-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-GISEL-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-GISEL-TRUE16-NEXT: s_cbranch_execnz .LBB8_1
+; GFX12-GISEL-TRUE16-NEXT: ; %bb.2: ; %bb2
+; GFX12-GISEL-TRUE16-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -344,26 +622,48 @@ bb2:
}
define amdgpu_kernel void @struct_atomic_buffer_load_v4i32(<4 x i32> %addr, i32 %index) {
-; CHECK-LABEL: struct_atomic_buffer_load_v4i32:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_clause 0x1
-; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_mov_b32_e32 v1, s6
-; CHECK-NEXT: .LBB9_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: buffer_load_b128 v[2:5], v1, s[0:3], 0 idxen offset:4 glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v5, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB9_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: struct_atomic_buffer_load_v4i32:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_clause 0x1
+; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-NEXT: .LBB9_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: buffer_load_b128 v[2:5], v1, s[0:3], 0 idxen offset:4 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v5, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB9_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_atomic_buffer_load_v4i32:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s6
+; GFX12-NEXT: .LBB9_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: buffer_load_b128 v[2:5], v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v5, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB9_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -377,28 +677,52 @@ bb2:
}
define amdgpu_kernel void @struct_atomic_buffer_load_ptr(<4 x i32> %addr, i32 %index) {
-; CHECK-LABEL: struct_atomic_buffer_load_ptr:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_clause 0x1
-; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_mov_b32_e32 v1, s6
-; CHECK-NEXT: .LBB10_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: flat_load_b32 v2, v[2:3]
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB10_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: struct_atomic_buffer_load_ptr:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_clause 0x1
+; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-NEXT: .LBB10_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: flat_load_b32 v2, v[2:3]
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB10_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_atomic_buffer_load_ptr:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s6
+; GFX12-NEXT: .LBB10_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: flat_load_b32 v2, v[2:3]
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB10_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.load.tfe.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.load.tfe.ll
index 13b28d4..9abbc06 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.load.tfe.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.load.tfe.ll
@@ -6,6 +6,7 @@
; RUN: llc -mcpu=gfx1010 -mtriple=amdgcn-- < %s | FileCheck %s -check-prefixes=GFX910,GFX10
; RUN: llc -mcpu=gfx1100 -mtriple=amdgcn-- < %s | FileCheck %s -check-prefix=GFX11
; RUN: llc -mcpu=gfx1200 -mtriple=amdgcn-- < %s | FileCheck %s -check-prefix=GFX12
+; RUN: llc -mcpu=gfx1250 -mtriple=amdgcn-- < %s | FileCheck %s -check-prefix=GFX12
define amdgpu_ps void @struct_buffer_load_i8_tfe(<4 x i32> inreg %rsrc, ptr addrspace(1) %data_addr, ptr addrspace(1) %tfe_addr) {
; GFX67-LABEL: struct_buffer_load_i8_tfe:
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.store.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.store.ll
index 9ce33c6..822016b 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.store.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.buffer.store.ll
@@ -3,6 +3,8 @@
; RUN: llc < %s -mtriple=amdgcn -mcpu=tonga | FileCheck -check-prefixes=GFX68,GFX8 %s
; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 | FileCheck -check-prefixes=GFX11,GFX11-TRUE16 %s
; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 | FileCheck -check-prefixes=GFX11,GFX11-FAKE16 %s
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 | FileCheck -check-prefixes=GFX12,GFX12-TRUE16 %s
+; RUN: llc < %s -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 | FileCheck -check-prefixes=GFX12,GFX12-FAKE16 %s
define amdgpu_ps void @buffer_store(<4 x i32> inreg, <4 x float>, <4 x float>, <4 x float>) {
; GFX68-LABEL: buffer_store:
@@ -21,6 +23,15 @@ define amdgpu_ps void @buffer_store(<4 x i32> inreg, <4 x float>, <4 x float>, <
; GFX11-NEXT: buffer_store_b128 v[4:7], v12, s[0:3], 0 idxen glc
; GFX11-NEXT: buffer_store_b128 v[8:11], v12, s[0:3], 0 idxen slc
; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: buffer_store:
+; GFX12: ; %bb.0: ; %main_body
+; GFX12-NEXT: v_mov_b32_e32 v12, 0
+; GFX12-NEXT: s_clause 0x2
+; GFX12-NEXT: buffer_store_b128 v[0:3], v12, s[0:3], null idxen
+; GFX12-NEXT: buffer_store_b128 v[4:7], v12, s[0:3], null idxen th:TH_STORE_NT
+; GFX12-NEXT: buffer_store_b128 v[8:11], v12, s[0:3], null idxen th:TH_STORE_HT
+; GFX12-NEXT: s_endpgm
main_body:
call void @llvm.amdgcn.struct.buffer.store.v4f32(<4 x float> %1, <4 x i32> %0, i32 0, i32 0, i32 0, i32 0)
call void @llvm.amdgcn.struct.buffer.store.v4f32(<4 x float> %2, <4 x i32> %0, i32 0, i32 0, i32 0, i32 1)
@@ -40,6 +51,12 @@ define amdgpu_ps void @buffer_store_immoffs(<4 x i32> inreg, <4 x float>) {
; GFX11-NEXT: v_mov_b32_e32 v4, 0
; GFX11-NEXT: buffer_store_b128 v[0:3], v4, s[0:3], 0 idxen offset:42
; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: buffer_store_immoffs:
+; GFX12: ; %bb.0: ; %main_body
+; GFX12-NEXT: v_mov_b32_e32 v4, 0
+; GFX12-NEXT: buffer_store_b128 v[0:3], v4, s[0:3], null idxen offset:42
+; GFX12-NEXT: s_endpgm
main_body:
call void @llvm.amdgcn.struct.buffer.store.v4f32(<4 x float> %1, <4 x i32> %0, i32 0, i32 42, i32 0, i32 0)
ret void
@@ -55,6 +72,11 @@ define amdgpu_ps void @buffer_store_idx(<4 x i32> inreg, <4 x float>, i32) {
; GFX11: ; %bb.0: ; %main_body
; GFX11-NEXT: buffer_store_b128 v[0:3], v4, s[0:3], 0 idxen
; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: buffer_store_idx:
+; GFX12: ; %bb.0: ; %main_body
+; GFX12-NEXT: buffer_store_b128 v[0:3], v4, s[0:3], null idxen
+; GFX12-NEXT: s_endpgm
main_body:
call void @llvm.amdgcn.struct.buffer.store.v4f32(<4 x float> %1, <4 x i32> %0, i32 %2, i32 0, i32 0, i32 0)
ret void
@@ -76,6 +98,12 @@ define amdgpu_ps void @buffer_store_ofs(<4 x i32> inreg, <4 x float>, i32) {
; GFX11-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, s4
; GFX11-NEXT: buffer_store_b128 v[0:3], v[4:5], s[0:3], 0 idxen offen
; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: buffer_store_ofs:
+; GFX12: ; %bb.0: ; %main_body
+; GFX12-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, 0
+; GFX12-NEXT: buffer_store_b128 v[0:3], v[4:5], s[0:3], null idxen offen
+; GFX12-NEXT: s_endpgm
main_body:
call void @llvm.amdgcn.struct.buffer.store.v4f32(<4 x float> %1, <4 x i32> %0, i32 0, i32 %2, i32 0, i32 0)
ret void
@@ -91,6 +119,11 @@ define amdgpu_ps void @buffer_store_both(<4 x i32> inreg, <4 x float>, i32, i32)
; GFX11: ; %bb.0: ; %main_body
; GFX11-NEXT: buffer_store_b128 v[0:3], v[4:5], s[0:3], 0 idxen offen
; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: buffer_store_both:
+; GFX12: ; %bb.0: ; %main_body
+; GFX12-NEXT: buffer_store_b128 v[0:3], v[4:5], s[0:3], null idxen offen
+; GFX12-NEXT: s_endpgm
main_body:
call void @llvm.amdgcn.struct.buffer.store.v4f32(<4 x float> %1, <4 x i32> %0, i32 %2, i32 %3, i32 0, i32 0)
ret void
@@ -108,6 +141,12 @@ define amdgpu_ps void @buffer_store_both_reversed(<4 x i32> inreg, <4 x float>,
; GFX11-NEXT: v_mov_b32_e32 v6, v4
; GFX11-NEXT: buffer_store_b128 v[0:3], v[5:6], s[0:3], 0 idxen offen
; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: buffer_store_both_reversed:
+; GFX12: ; %bb.0: ; %main_body
+; GFX12-NEXT: v_dual_mov_b32 v6, v5 :: v_dual_mov_b32 v7, v4
+; GFX12-NEXT: buffer_store_b128 v[0:3], v[6:7], s[0:3], null idxen offen
+; GFX12-NEXT: s_endpgm
main_body:
call void @llvm.amdgcn.struct.buffer.store.v4f32(<4 x float> %1, <4 x i32> %0, i32 %3, i32 %2, i32 0, i32 0)
ret void
@@ -139,6 +178,15 @@ define amdgpu_ps void @buffer_store_wait(<4 x i32> inreg, <4 x float>, i32, i32,
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: buffer_store_b128 v[0:3], v6, s[0:3], 0 idxen
; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: buffer_store_wait:
+; GFX12: ; %bb.0: ; %main_body
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: buffer_store_b128 v[0:3], v4, s[0:3], null idxen
+; GFX12-NEXT: buffer_load_b128 v[0:3], v5, s[0:3], null idxen
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: buffer_store_b128 v[0:3], v6, s[0:3], null idxen
+; GFX12-NEXT: s_endpgm
main_body:
call void @llvm.amdgcn.struct.buffer.store.v4f32(<4 x float> %1, <4 x i32> %0, i32 %2, i32 0, i32 0, i32 0)
%data = call <4 x float> @llvm.amdgcn.struct.buffer.load.v4f32(<4 x i32> %0, i32 %3, i32 0, i32 0, i32 0)
@@ -156,6 +204,11 @@ define amdgpu_ps void @buffer_store_x1(<4 x i32> inreg %rsrc, float %data, i32 %
; GFX11: ; %bb.0: ; %main_body
; GFX11-NEXT: buffer_store_b32 v0, v1, s[0:3], 0 idxen
; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: buffer_store_x1:
+; GFX12: ; %bb.0: ; %main_body
+; GFX12-NEXT: buffer_store_b32 v0, v1, s[0:3], null idxen
+; GFX12-NEXT: s_endpgm
main_body:
call void @llvm.amdgcn.struct.buffer.store.f32(float %data, <4 x i32> %rsrc, i32 %index, i32 0, i32 0, i32 0)
ret void
@@ -171,6 +224,11 @@ define amdgpu_ps void @buffer_store_x2(<4 x i32> inreg %rsrc, <2 x float> %data,
; GFX11: ; %bb.0: ; %main_body
; GFX11-NEXT: buffer_store_b64 v[0:1], v2, s[0:3], 0 idxen
; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: buffer_store_x2:
+; GFX12: ; %bb.0: ; %main_body
+; GFX12-NEXT: buffer_store_b64 v[0:1], v2, s[0:3], null idxen
+; GFX12-NEXT: s_endpgm
main_body:
call void @llvm.amdgcn.struct.buffer.store.v2f32(<2 x float> %data, <4 x i32> %rsrc, i32 %index, i32 0, i32 0, i32 0)
ret void
@@ -193,6 +251,15 @@ define amdgpu_ps void @buffer_store_int(<4 x i32> inreg, <4 x i32>, <2 x i32>, i
; GFX11-NEXT: buffer_store_b64 v[4:5], v7, s[0:3], 0 idxen glc
; GFX11-NEXT: buffer_store_b32 v6, v7, s[0:3], 0 idxen slc
; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: buffer_store_int:
+; GFX12: ; %bb.0: ; %main_body
+; GFX12-NEXT: v_mov_b32_e32 v7, 0
+; GFX12-NEXT: s_clause 0x2
+; GFX12-NEXT: buffer_store_b128 v[0:3], v7, s[0:3], null idxen
+; GFX12-NEXT: buffer_store_b64 v[4:5], v7, s[0:3], null idxen th:TH_STORE_NT
+; GFX12-NEXT: buffer_store_b32 v6, v7, s[0:3], null idxen th:TH_STORE_HT
+; GFX12-NEXT: s_endpgm
main_body:
call void @llvm.amdgcn.struct.buffer.store.v4i32(<4 x i32> %1, <4 x i32> %0, i32 0, i32 0, i32 0, i32 0)
call void @llvm.amdgcn.struct.buffer.store.v2i32(<2 x i32> %2, <4 x i32> %0, i32 0, i32 0, i32 0, i32 1)
@@ -212,6 +279,12 @@ define amdgpu_ps void @struct_buffer_store_byte(<4 x i32> inreg %rsrc, float %v1
; GFX11-NEXT: v_cvt_u32_f32_e32 v0, v0
; GFX11-NEXT: buffer_store_b8 v0, v1, s[0:3], 0 idxen
; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_buffer_store_byte:
+; GFX12: ; %bb.0: ; %main_body
+; GFX12-NEXT: v_cvt_u32_f32_e32 v0, v0
+; GFX12-NEXT: buffer_store_b8 v0, v1, s[0:3], null idxen
+; GFX12-NEXT: s_endpgm
main_body:
%v2 = fptoui float %v1 to i32
%v3 = trunc i32 %v2 to i8
@@ -237,6 +310,18 @@ define amdgpu_ps void @struct_buffer_store_f16(<4 x i32> inreg %rsrc, float %v1,
; GFX11-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
; GFX11-FAKE16-NEXT: buffer_store_b16 v0, v1, s[0:3], 0 idxen
; GFX11-FAKE16-NEXT: s_endpgm
+;
+; GFX12-TRUE16-LABEL: struct_buffer_store_f16:
+; GFX12-TRUE16: ; %bb.0:
+; GFX12-TRUE16-NEXT: v_cvt_f16_f32_e32 v0.l, v0
+; GFX12-TRUE16-NEXT: buffer_store_b16 v0, v1, s[0:3], null idxen
+; GFX12-TRUE16-NEXT: s_endpgm
+;
+; GFX12-FAKE16-LABEL: struct_buffer_store_f16:
+; GFX12-FAKE16: ; %bb.0:
+; GFX12-FAKE16-NEXT: v_cvt_f16_f32_e32 v0, v0
+; GFX12-FAKE16-NEXT: buffer_store_b16 v0, v1, s[0:3], null idxen
+; GFX12-FAKE16-NEXT: s_endpgm
%v2 = fptrunc float %v1 to half
call void @llvm.amdgcn.struct.buffer.store.f16(half %v2, <4 x i32> %rsrc, i32 %index, i32 0, i32 0, i32 0)
ret void
@@ -261,6 +346,11 @@ define amdgpu_ps void @struct_buffer_store_v2f16(<4 x i32> inreg %rsrc, <2 x hal
; GFX11: ; %bb.0:
; GFX11-NEXT: buffer_store_b32 v0, v1, s[0:3], 0 idxen
; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_buffer_store_v2f16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: buffer_store_b32 v0, v1, s[0:3], null idxen
+; GFX12-NEXT: s_endpgm
call void @llvm.amdgcn.struct.buffer.store.v2f16(<2 x half> %v1, <4 x i32> %rsrc, i32 %index, i32 0, i32 0, i32 0)
ret void
}
@@ -288,6 +378,11 @@ define amdgpu_ps void @struct_buffer_store_v4f16(<4 x i32> inreg %rsrc, <4 x hal
; GFX11: ; %bb.0:
; GFX11-NEXT: buffer_store_b64 v[0:1], v2, s[0:3], 0 idxen
; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_buffer_store_v4f16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: buffer_store_b64 v[0:1], v2, s[0:3], null idxen
+; GFX12-NEXT: s_endpgm
call void @llvm.amdgcn.struct.buffer.store.v4f16(<4 x half> %v1, <4 x i32> %rsrc, i32 %index, i32 0, i32 0, i32 0)
ret void
}
@@ -304,6 +399,12 @@ define amdgpu_ps void @struct_buffer_store_i16(<4 x i32> inreg %rsrc, float %v1,
; GFX11-NEXT: v_cvt_u32_f32_e32 v0, v0
; GFX11-NEXT: buffer_store_b16 v0, v1, s[0:3], 0 idxen
; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_buffer_store_i16:
+; GFX12: ; %bb.0: ; %main_body
+; GFX12-NEXT: v_cvt_u32_f32_e32 v0, v0
+; GFX12-NEXT: buffer_store_b16 v0, v1, s[0:3], null idxen
+; GFX12-NEXT: s_endpgm
main_body:
%v2 = fptoui float %v1 to i32
%v3 = trunc i32 %v2 to i16
@@ -329,6 +430,11 @@ define amdgpu_ps void @struct_buffer_store_vif16(<4 x i32> inreg %rsrc, <2 x i16
; GFX11: ; %bb.0:
; GFX11-NEXT: buffer_store_b32 v0, v1, s[0:3], 0 idxen
; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_buffer_store_vif16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: buffer_store_b32 v0, v1, s[0:3], null idxen
+; GFX12-NEXT: s_endpgm
call void @llvm.amdgcn.struct.buffer.store.v2i16(<2 x i16> %v1, <4 x i32> %rsrc, i32 %index, i32 0, i32 0, i32 0)
ret void
}
@@ -354,6 +460,11 @@ define amdgpu_ps void @struct_buffer_store_v4i16(<4 x i32> inreg %rsrc, <4 x i16
; GFX11: ; %bb.0:
; GFX11-NEXT: buffer_store_b64 v[0:1], v2, s[0:3], 0 idxen
; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_buffer_store_v4i16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: buffer_store_b64 v[0:1], v2, s[0:3], null idxen
+; GFX12-NEXT: s_endpgm
call void @llvm.amdgcn.struct.buffer.store.v4i16(<4 x i16> %v1, <4 x i32> %rsrc, i32 %index, i32 0, i32 0, i32 0)
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.atomic.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.atomic.buffer.load.ll
index 8f33dd6..23db247 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.atomic.buffer.load.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.atomic.buffer.load.ll
@@ -1,30 +1,58 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=CHECK,CHECK-SDAG-TRUE16
-; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=CHECK,CHECK-FAKE16
-; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=CHECK,CHECK-GISEL
-; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=CHECK,CHECK-GISEL
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-SDAG-TRUE16
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-FAKE16
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL-TRUE16
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-FAKE16
+; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL
+; RUN: llc -global-isel=1 -new-reg-bank-select -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=GFX11,GFX11-GISEL
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=GFX12,GFX12-SDAG-TRUE16
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=GFX12,GFX12-FAKE16
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 < %s | FileCheck %s -check-prefixes=GFX12,GFX12-GISEL-TRUE16
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck %s -check-prefixes=GFX12,GFX12-FAKE16
define amdgpu_kernel void @struct_ptr_atomic_buffer_load_i32(ptr addrspace(8) %ptr, i32 %index) {
-; CHECK-LABEL: struct_ptr_atomic_buffer_load_i32:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_clause 0x1
-; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_mov_b32_e32 v1, s6
-; CHECK-NEXT: .LBB0_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB0_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: struct_ptr_atomic_buffer_load_i32:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_clause 0x1
+; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-NEXT: .LBB0_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB0_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_ptr_atomic_buffer_load_i32:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s6
+; GFX12-NEXT: .LBB0_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], null idxen th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB0_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -37,23 +65,43 @@ bb2:
}
define amdgpu_kernel void @struct_ptr_atomic_buffer_load_i32_const_idx(ptr addrspace(8) %ptr) {
-; CHECK-LABEL: struct_ptr_atomic_buffer_load_i32_const_idx:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_and_b32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: .LBB1_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB1_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: struct_ptr_atomic_buffer_load_i32_const_idx:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: .LBB1_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB1_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_ptr_atomic_buffer_load_i32_const_idx:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: v_mov_b32_e32 v1, 15
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: .LBB1_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], null idxen th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB1_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -66,26 +114,48 @@ bb2:
}
define amdgpu_kernel void @struct_ptr_atomic_buffer_load_i32_off(ptr addrspace(8) %ptr, i32 %index) {
-; CHECK-LABEL: struct_ptr_atomic_buffer_load_i32_off:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_clause 0x1
-; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_mov_b32_e32 v1, s6
-; CHECK-NEXT: .LBB2_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB2_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: struct_ptr_atomic_buffer_load_i32_off:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_clause 0x1
+; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-NEXT: .LBB2_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB2_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_ptr_atomic_buffer_load_i32_off:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s6
+; GFX12-NEXT: .LBB2_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], null idxen th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB2_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -98,26 +168,49 @@ bb2:
}
define amdgpu_kernel void @struct_ptr_atomic_buffer_load_i32_soff(ptr addrspace(8) %ptr, i32 %index) {
-; CHECK-LABEL: struct_ptr_atomic_buffer_load_i32_soff:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_clause 0x1
-; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_mov_b32_e32 v1, s6
-; CHECK-NEXT: .LBB3_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 4 idxen offset:4 glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB3_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: struct_ptr_atomic_buffer_load_i32_soff:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_clause 0x1
+; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-NEXT: .LBB3_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 4 idxen offset:4 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB3_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_ptr_atomic_buffer_load_i32_soff:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: s_mov_b32 s5, 4
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s6
+; GFX12-NEXT: .LBB3_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], s5 idxen offset:4 th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB3_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -129,26 +222,48 @@ bb2:
ret void
}
define amdgpu_kernel void @struct_ptr_atomic_buffer_load_i32_dlc(ptr addrspace(8) %ptr, i32 %index) {
-; CHECK-LABEL: struct_ptr_atomic_buffer_load_i32_dlc:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_clause 0x1
-; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_mov_b32_e32 v1, s6
-; CHECK-NEXT: .LBB4_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen offset:4 dlc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB4_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: struct_ptr_atomic_buffer_load_i32_dlc:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_clause 0x1
+; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-NEXT: .LBB4_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen offset:4 dlc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB4_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_ptr_atomic_buffer_load_i32_dlc:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s6
+; GFX12-NEXT: .LBB4_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT_RT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB4_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -161,26 +276,49 @@ bb2:
}
define amdgpu_kernel void @struct_ptr_nonatomic_buffer_load_i32(ptr addrspace(8) %ptr, i32 %index) {
-; CHECK-LABEL: struct_ptr_nonatomic_buffer_load_i32:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_clause 0x1
-; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_dual_mov_b32 v1, s6 :: v_dual_and_b32 v0, 0x3ff, v0
-; CHECK-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 idxen offset:4 glc
-; CHECK-NEXT: s_mov_b32 s0, 0
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
-; CHECK-NEXT: .LBB5_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: s_and_b32 s1, exec_lo, vcc_lo
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; CHECK-NEXT: s_or_b32 s0, s1, s0
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
-; CHECK-NEXT: s_cbranch_execnz .LBB5_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: struct_ptr_nonatomic_buffer_load_i32:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_clause 0x1
+; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: v_dual_mov_b32 v1, s6 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX11-NEXT: buffer_load_b32 v1, v1, s[0:3], 0 idxen offset:4 glc
+; GFX11-NEXT: s_mov_b32 s0, 0
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX11-NEXT: .LBB5_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: s_and_b32 s1, exec_lo, vcc_lo
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: s_or_b32 s0, s1, s0
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_execnz .LBB5_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_ptr_nonatomic_buffer_load_i32:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s6
+; GFX12-NEXT: buffer_load_b32 v1, v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s0, 0
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v1, v0
+; GFX12-NEXT: .LBB5_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: s_and_b32 s1, exec_lo, vcc_lo
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX12-NEXT: s_or_b32 s0, s1, s0
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
+; GFX12-NEXT: s_cbranch_execnz .LBB5_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -193,26 +331,49 @@ bb2:
}
define amdgpu_kernel void @struct_ptr_atomic_buffer_load_i64(ptr addrspace(8) %ptr, i32 %index) {
-; CHECK-LABEL: struct_ptr_atomic_buffer_load_i64:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_clause 0x1
-; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_mov_b32_e32 v2, s6
-; CHECK-NEXT: .LBB6_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: buffer_load_b64 v[3:4], v2, s[0:3], 0 idxen offset:4 glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[3:4], v[0:1]
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB6_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: struct_ptr_atomic_buffer_load_i64:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_clause 0x1
+; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v2, s6
+; GFX11-NEXT: .LBB6_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: buffer_load_b64 v[3:4], v2, s[0:3], 0 idxen offset:4 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[3:4], v[0:1]
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB6_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_ptr_atomic_buffer_load_i64:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: v_mov_b32_e32 v1, 0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v2, s6
+; GFX12-NEXT: .LBB6_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: buffer_load_b64 v[4:5], v2, s[0:3], null idxen offset:4 th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u64_e32 vcc_lo, v[4:5], v[0:1]
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB6_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%id.zext = zext i32 %id to i64
@@ -226,26 +387,48 @@ bb2:
}
define amdgpu_kernel void @struct_ptr_atomic_buffer_load_v2i16(ptr addrspace(8) %ptr, i32 %index) {
-; CHECK-LABEL: struct_ptr_atomic_buffer_load_v2i16:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_clause 0x1
-; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_mov_b32_e32 v1, s6
-; CHECK-NEXT: .LBB7_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB7_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: struct_ptr_atomic_buffer_load_v2i16:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_clause 0x1
+; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-NEXT: .LBB7_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: buffer_load_b32 v2, v1, s[0:3], 0 idxen glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB7_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_ptr_atomic_buffer_load_v2i16:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s6
+; GFX12-NEXT: .LBB7_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: buffer_load_b32 v2, v1, s[0:3], null idxen th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB7_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -259,77 +442,172 @@ bb2:
}
define amdgpu_kernel void @struct_ptr_atomic_buffer_load_v4i16(ptr addrspace(8) %ptr, i32 %index) {
-; CHECK-SDAG-TRUE16-LABEL: struct_ptr_atomic_buffer_load_v4i16:
-; CHECK-SDAG-TRUE16: ; %bb.0: ; %bb
-; CHECK-SDAG-TRUE16-NEXT: s_clause 0x1
-; CHECK-SDAG-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0
-; CHECK-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s6
-; CHECK-SDAG-TRUE16-NEXT: .LBB8_1: ; %bb1
-; CHECK-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
-; CHECK-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; CHECK-SDAG-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; CHECK-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; CHECK-SDAG-TRUE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2
-; CHECK-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
-; CHECK-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB8_1
-; CHECK-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2
-; CHECK-SDAG-TRUE16-NEXT: s_endpgm
+; GFX11-SDAG-TRUE16-LABEL: struct_ptr_atomic_buffer_load_v4i16:
+; GFX11-SDAG-TRUE16: ; %bb.0: ; %bb
+; GFX11-SDAG-TRUE16-NEXT: s_clause 0x1
+; GFX11-SDAG-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-SDAG-TRUE16-NEXT: .LBB8_1: ; %bb1
+; GFX11-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
+; GFX11-SDAG-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-SDAG-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-SDAG-TRUE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2
+; GFX11-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX11-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB8_1
+; GFX11-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2
+; GFX11-SDAG-TRUE16-NEXT: s_endpgm
+;
+; GFX11-FAKE16-LABEL: struct_ptr_atomic_buffer_load_v4i16:
+; GFX11-FAKE16: ; %bb.0: ; %bb
+; GFX11-FAKE16-NEXT: s_clause 0x1
+; GFX11-FAKE16-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-FAKE16-NEXT: .LBB8_1: ; %bb1
+; GFX11-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-FAKE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2
+; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX11-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB8_1
+; GFX11-FAKE16-NEXT: ; %bb.2: ; %bb2
+; GFX11-FAKE16-NEXT: s_endpgm
+;
+; GFX11-GISEL-TRUE16-LABEL: struct_ptr_atomic_buffer_load_v4i16:
+; GFX11-GISEL-TRUE16: ; %bb.0: ; %bb
+; GFX11-GISEL-TRUE16-NEXT: s_clause 0x1
+; GFX11-GISEL-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-GISEL-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-GISEL-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-GISEL-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-GISEL-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-GISEL-TRUE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-GISEL-TRUE16-NEXT: .LBB8_1: ; %bb1
+; GFX11-GISEL-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-GISEL-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
+; GFX11-GISEL-TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l
+; GFX11-GISEL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-GISEL-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX11-GISEL-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-GISEL-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-GISEL-TRUE16-NEXT: s_cbranch_execnz .LBB8_1
+; GFX11-GISEL-TRUE16-NEXT: ; %bb.2: ; %bb2
+; GFX11-GISEL-TRUE16-NEXT: s_endpgm
+;
+; GFX11-GISEL-LABEL: struct_ptr_atomic_buffer_load_v4i16:
+; GFX11-GISEL: ; %bb.0: ; %bb
+; GFX11-GISEL-NEXT: s_clause 0x1
+; GFX11-GISEL-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-GISEL-NEXT: s_mov_b32 s4, 0
+; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-GISEL-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-GISEL-NEXT: .LBB8_1: ; %bb1
+; GFX11-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-GISEL-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
+; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
+; GFX11-GISEL-NEXT: v_readfirstlane_b32 s5, v2
+; GFX11-GISEL-NEXT: v_readfirstlane_b32 s6, v3
+; GFX11-GISEL-NEXT: s_pack_ll_b32_b16 s5, s5, s6
+; GFX11-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-GISEL-NEXT: v_cmp_ne_u32_e32 vcc_lo, s5, v0
+; GFX11-GISEL-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-GISEL-NEXT: s_cbranch_execnz .LBB8_1
+; GFX11-GISEL-NEXT: ; %bb.2: ; %bb2
+; GFX11-GISEL-NEXT: s_endpgm
;
-; CHECK-FAKE16-LABEL: struct_ptr_atomic_buffer_load_v4i16:
-; CHECK-FAKE16: ; %bb.0: ; %bb
-; CHECK-FAKE16-NEXT: s_clause 0x1
-; CHECK-FAKE16-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-FAKE16-NEXT: s_mov_b32 s4, 0
-; CHECK-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-FAKE16-NEXT: v_mov_b32_e32 v1, s6
-; CHECK-FAKE16-NEXT: .LBB8_1: ; %bb1
-; CHECK-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-FAKE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
-; CHECK-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; CHECK-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; CHECK-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; CHECK-FAKE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2
-; CHECK-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
-; CHECK-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-FAKE16-NEXT: s_cbranch_execnz .LBB8_1
-; CHECK-FAKE16-NEXT: ; %bb.2: ; %bb2
-; CHECK-FAKE16-NEXT: s_endpgm
+; GFX12-SDAG-TRUE16-LABEL: struct_ptr_atomic_buffer_load_v4i16:
+; GFX12-SDAG-TRUE16: ; %bb.0: ; %bb
+; GFX12-SDAG-TRUE16-NEXT: s_clause 0x1
+; GFX12-SDAG-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-SDAG-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-SDAG-TRUE16-NEXT: s_wait_xcnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX12-SDAG-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX12-SDAG-TRUE16-NEXT: .LBB8_1: ; %bb1
+; GFX12-SDAG-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-SDAG-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT
+; GFX12-SDAG-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-SDAG-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-SDAG-TRUE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2
+; GFX12-SDAG-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX12-SDAG-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-SDAG-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-SDAG-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-SDAG-TRUE16-NEXT: s_cbranch_execnz .LBB8_1
+; GFX12-SDAG-TRUE16-NEXT: ; %bb.2: ; %bb2
+; GFX12-SDAG-TRUE16-NEXT: s_endpgm
;
-; CHECK-GISEL-LABEL: struct_ptr_atomic_buffer_load_v4i16:
-; CHECK-GISEL: ; %bb.0: ; %bb
-; CHECK-GISEL-NEXT: s_clause 0x1
-; CHECK-GISEL-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-GISEL-NEXT: s_mov_b32 s4, 0
-; CHECK-GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-GISEL-NEXT: v_mov_b32_e32 v1, s6
-; CHECK-GISEL-NEXT: .LBB8_1: ; %bb1
-; CHECK-GISEL-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-GISEL-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
-; CHECK-GISEL-NEXT: s_waitcnt vmcnt(0)
-; CHECK-GISEL-NEXT: v_readfirstlane_b32 s5, v2
-; CHECK-GISEL-NEXT: v_readfirstlane_b32 s6, v3
-; CHECK-GISEL-NEXT: s_pack_ll_b32_b16 s5, s5, s6
-; CHECK-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; CHECK-GISEL-NEXT: v_cmp_ne_u32_e32 vcc_lo, s5, v0
-; CHECK-GISEL-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-GISEL-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-GISEL-NEXT: s_cbranch_execnz .LBB8_1
-; CHECK-GISEL-NEXT: ; %bb.2: ; %bb2
-; CHECK-GISEL-NEXT: s_endpgm
+; GFX12-FAKE16-LABEL: struct_ptr_atomic_buffer_load_v4i16:
+; GFX12-FAKE16: ; %bb.0: ; %bb
+; GFX12-FAKE16-NEXT: s_clause 0x1
+; GFX12-FAKE16-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-FAKE16-NEXT: s_wait_xcnt 0x0
+; GFX12-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX12-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-FAKE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX12-FAKE16-NEXT: .LBB8_1: ; %bb1
+; GFX12-FAKE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-FAKE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT
+; GFX12-FAKE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX12-FAKE16-NEXT: v_lshl_or_b32 v2, v3, 16, v2
+; GFX12-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX12-FAKE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-FAKE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-FAKE16-NEXT: s_cbranch_execnz .LBB8_1
+; GFX12-FAKE16-NEXT: ; %bb.2: ; %bb2
+; GFX12-FAKE16-NEXT: s_endpgm
+;
+; GFX12-GISEL-TRUE16-LABEL: struct_ptr_atomic_buffer_load_v4i16:
+; GFX12-GISEL-TRUE16: ; %bb.0: ; %bb
+; GFX12-GISEL-TRUE16-NEXT: s_clause 0x1
+; GFX12-GISEL-TRUE16-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-GISEL-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-GISEL-TRUE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-GISEL-TRUE16-NEXT: s_wait_xcnt 0x0
+; GFX12-GISEL-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX12-GISEL-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX12-GISEL-TRUE16-NEXT: v_mov_b32_e32 v1, s6
+; GFX12-GISEL-TRUE16-NEXT: .LBB8_1: ; %bb1
+; GFX12-GISEL-TRUE16-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-GISEL-TRUE16-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT
+; GFX12-GISEL-TRUE16-NEXT: s_wait_loadcnt 0x0
+; GFX12-GISEL-TRUE16-NEXT: v_mov_b16_e32 v2.h, v3.l
+; GFX12-GISEL-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX12-GISEL-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX12-GISEL-TRUE16-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-GISEL-TRUE16-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-GISEL-TRUE16-NEXT: s_cbranch_execnz .LBB8_1
+; GFX12-GISEL-TRUE16-NEXT: ; %bb.2: ; %bb2
+; GFX12-GISEL-TRUE16-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -344,26 +622,48 @@ bb2:
}
define amdgpu_kernel void @struct_ptr_atomic_buffer_load_v4i32(ptr addrspace(8) %ptr, i32 %index) {
-; CHECK-LABEL: struct_ptr_atomic_buffer_load_v4i32:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_clause 0x1
-; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_mov_b32_e32 v1, s6
-; CHECK-NEXT: .LBB9_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: buffer_load_b128 v[2:5], v1, s[0:3], 0 idxen offset:4 glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v5, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB9_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: struct_ptr_atomic_buffer_load_v4i32:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_clause 0x1
+; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-NEXT: .LBB9_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: buffer_load_b128 v[2:5], v1, s[0:3], 0 idxen offset:4 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v5, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB9_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_ptr_atomic_buffer_load_v4i32:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s6
+; GFX12-NEXT: .LBB9_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: buffer_load_b128 v[2:5], v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v5, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB9_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
@@ -377,28 +677,52 @@ bb2:
}
define amdgpu_kernel void @struct_ptr_atomic_buffer_load_ptr(ptr addrspace(8) %ptr, i32 %index) {
-; CHECK-LABEL: struct_ptr_atomic_buffer_load_ptr:
-; CHECK: ; %bb.0: ; %bb
-; CHECK-NEXT: s_clause 0x1
-; CHECK-NEXT: s_load_b32 s6, s[4:5], 0x34
-; CHECK-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: v_mov_b32_e32 v1, s6
-; CHECK-NEXT: .LBB10_1: ; %bb1
-; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
-; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: flat_load_b32 v2, v[2:3]
-; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
-; CHECK-NEXT: s_or_b32 s4, vcc_lo, s4
-; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; CHECK-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
-; CHECK-NEXT: s_cbranch_execnz .LBB10_1
-; CHECK-NEXT: ; %bb.2: ; %bb2
-; CHECK-NEXT: s_endpgm
+; GFX11-LABEL: struct_ptr_atomic_buffer_load_ptr:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: s_clause 0x1
+; GFX11-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-NEXT: .LBB10_1: ; %bb1
+; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX11-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], 0 idxen offset:4 glc
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: flat_load_b32 v2, v[2:3]
+; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX11-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB10_1
+; GFX11-NEXT: ; %bb.2: ; %bb2
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: struct_ptr_atomic_buffer_load_ptr:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: s_load_b32 s6, s[4:5], 0x34
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX12-NEXT: s_wait_xcnt 0x0
+; GFX12-NEXT: s_mov_b32 s4, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_mov_b32_e32 v1, s6
+; GFX12-NEXT: .LBB10_1: ; %bb1
+; GFX12-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12-NEXT: buffer_load_b64 v[2:3], v1, s[0:3], null idxen offset:4 th:TH_LOAD_NT
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: flat_load_b32 v2, v[2:3]
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: v_cmp_ne_u32_e32 vcc_lo, v2, v0
+; GFX12-NEXT: s_or_b32 s4, vcc_lo, s4
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX12-NEXT: s_and_not1_b32 exec_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_execnz .LBB10_1
+; GFX12-NEXT: ; %bb.2: ; %bb2
+; GFX12-NEXT: s_endpgm
bb:
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
br label %bb1
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fadd_nortn.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fadd_nortn.ll
index 746b879..4366472 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fadd_nortn.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fadd_nortn.ll
@@ -3,6 +3,7 @@
; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx90a < %s | FileCheck -check-prefix=GFX90A %s
; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx942 < %s | FileCheck -check-prefix=GFX942 %s
; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX1200 %s
+; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 < %s | FileCheck -check-prefix=GFX1250 %s
define void @struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(float %val, ptr addrspace(8) inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) #0 {
; GFX908-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
@@ -39,6 +40,14 @@ define void @struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_v
; GFX1200-NEXT: s_wait_kmcnt 0x0
; GFX1200-NEXT: buffer_atomic_add_f32 v0, v[1:2], s[0:3], s16 idxen offen
; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: buffer_atomic_add_f32 v0, v[2:3], s[0:3], s16 idxen offen
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -75,6 +84,13 @@ define void @struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voff
; GFX1200-NEXT: s_wait_kmcnt 0x0
; GFX1200-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 idxen
; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 idxen
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 %soffset, i32 0)
ret void
}
@@ -114,6 +130,14 @@ define void @struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_v
; GFX1200-NEXT: s_wait_kmcnt 0x0
; GFX1200-NEXT: buffer_atomic_add_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_NT
; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: buffer_atomic_add_f32 v0, v[2:3], s[0:3], s16 idxen offen th:TH_ATOMIC_NT
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 2)
ret void
}
@@ -153,6 +177,14 @@ define void @struct_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr
; GFX1200-NEXT: s_wait_kmcnt 0x0
; GFX1200-NEXT: buffer_atomic_pk_add_f16 v0, v[1:2], s[0:3], s16 idxen offen
; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: buffer_atomic_pk_add_f16 v0, v[2:3], s[0:3], s16 idxen offen
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call <2 x half> @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.v2f16(<2 x half> %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -291,6 +323,42 @@ define void @struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__vgpr_rsrc__vgpr_v
; GFX1200-NEXT: ; %bb.2:
; GFX1200-NEXT: s_mov_b32 exec_lo, s2
; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__vgpr_rsrc__vgpr_voffset__vgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v9, v6 :: v_dual_mov_b32 v8, v5
+; GFX1250-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: s_mov_b32 s2, exec_lo
+; GFX1250-NEXT: .LBB4_1: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT: v_readfirstlane_b32 s4, v2
+; GFX1250-NEXT: v_readfirstlane_b32 s5, v3
+; GFX1250-NEXT: v_readfirstlane_b32 s6, v4
+; GFX1250-NEXT: v_readfirstlane_b32 s7, v5
+; GFX1250-NEXT: v_readfirstlane_b32 s3, v7
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[2:3]
+; GFX1250-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[4:5]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT: v_cmp_eq_u32_e64 s1, s3, v7
+; GFX1250-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX1250-NEXT: s_and_b32 s0, s0, s1
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_and_saveexec_b32 s0, s0
+; GFX1250-NEXT: buffer_atomic_add_f32 v0, v[8:9], s[4:7], s3 idxen offen
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5
+; GFX1250-NEXT: ; implicit-def: $vgpr7
+; GFX1250-NEXT: ; implicit-def: $vgpr0
+; GFX1250-NEXT: ; implicit-def: $vgpr8_vgpr9
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB4_1
+; GFX1250-NEXT: ; %bb.2:
+; GFX1250-NEXT: s_mov_b32 exec_lo, s2
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -429,6 +497,42 @@ define void @struct_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__vgpr_rsrc__vgpr
; GFX1200-NEXT: ; %bb.2:
; GFX1200-NEXT: s_mov_b32 exec_lo, s2
; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_v2f16_noret__vgpr_val__vgpr_rsrc__vgpr_voffset__vgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v9, v6 :: v_dual_mov_b32 v8, v5
+; GFX1250-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: s_mov_b32 s2, exec_lo
+; GFX1250-NEXT: .LBB5_1: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT: v_readfirstlane_b32 s4, v2
+; GFX1250-NEXT: v_readfirstlane_b32 s5, v3
+; GFX1250-NEXT: v_readfirstlane_b32 s6, v4
+; GFX1250-NEXT: v_readfirstlane_b32 s7, v5
+; GFX1250-NEXT: v_readfirstlane_b32 s3, v7
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[2:3]
+; GFX1250-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[4:5]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT: v_cmp_eq_u32_e64 s1, s3, v7
+; GFX1250-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX1250-NEXT: s_and_b32 s0, s0, s1
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_and_saveexec_b32 s0, s0
+; GFX1250-NEXT: buffer_atomic_pk_add_f16 v0, v[8:9], s[4:7], s3 idxen offen
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5
+; GFX1250-NEXT: ; implicit-def: $vgpr7
+; GFX1250-NEXT: ; implicit-def: $vgpr0
+; GFX1250-NEXT: ; implicit-def: $vgpr8_vgpr9
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB5_1
+; GFX1250-NEXT: ; %bb.2:
+; GFX1250-NEXT: s_mov_b32 exec_lo, s2
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call <2 x half> @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.v2f16(<2 x half> %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fadd_rtn.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fadd_rtn.ll
index 71c63bf..0191a85 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fadd_rtn.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fadd_rtn.ll
@@ -2,6 +2,7 @@
; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx90a < %s | FileCheck -check-prefix=GFX90A %s
; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx942 < %s | FileCheck -check-prefix=GFX942 %s
; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX1200 %s
+; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 < %s | FileCheck -check-prefix=GFX1250 %s
define float @struct_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(float %val, ptr addrspace(8) inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) #0 {
; GFX90A-LABEL: struct_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
@@ -32,6 +33,15 @@ define float @struct_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_vo
; GFX1200-NEXT: buffer_atomic_add_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN
; GFX1200-NEXT: s_wait_loadcnt 0x0
; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: buffer_atomic_add_f32 v0, v[2:3], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret float %ret
}
@@ -62,6 +72,14 @@ define float @struct_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__0_voffs
; GFX1200-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 idxen th:TH_ATOMIC_RETURN
; GFX1200-NEXT: s_wait_loadcnt 0x0
; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: buffer_atomic_add_f32 v0, v1, s[0:3], s16 idxen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 %soffset, i32 0)
ret float %ret
}
@@ -95,6 +113,15 @@ define float @struct_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_vo
; GFX1200-NEXT: buffer_atomic_add_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_NT_RETURN
; GFX1200-NEXT: s_wait_loadcnt 0x0
; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: buffer_atomic_add_f32 v0, v[2:3], s[0:3], s16 idxen offen th:TH_ATOMIC_NT_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 2)
ret float %ret
}
@@ -128,6 +155,15 @@ define <2 x half> @struct_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__sgpr_rsrc__
; GFX1200-NEXT: buffer_atomic_pk_add_f16 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN
; GFX1200-NEXT: s_wait_loadcnt 0x0
; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: buffer_atomic_pk_add_f16 v0, v[2:3], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call <2 x half> @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.v2f16(<2 x half> %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret <2 x half> %ret
}
@@ -237,6 +273,43 @@ define float @struct_ptr_buffer_atomic_add_f32_rtn__vgpr_val__vgpr_rsrc__vgpr_vo
; GFX1200-NEXT: s_mov_b32 exec_lo, s2
; GFX1200-NEXT: s_wait_loadcnt 0x0
; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_rtn__vgpr_val__vgpr_rsrc__vgpr_voffset__vgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v9, v6 :: v_dual_mov_b32 v8, v5
+; GFX1250-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: s_mov_b32 s2, exec_lo
+; GFX1250-NEXT: .LBB4_1: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT: v_readfirstlane_b32 s4, v2
+; GFX1250-NEXT: v_readfirstlane_b32 s5, v3
+; GFX1250-NEXT: v_readfirstlane_b32 s6, v4
+; GFX1250-NEXT: v_readfirstlane_b32 s7, v5
+; GFX1250-NEXT: v_readfirstlane_b32 s3, v7
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[2:3]
+; GFX1250-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[4:5]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT: v_cmp_eq_u32_e64 s1, s3, v7
+; GFX1250-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX1250-NEXT: s_and_b32 s0, s0, s1
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_and_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: buffer_atomic_add_f32 v0, v[8:9], s[4:7], s3 idxen offen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5
+; GFX1250-NEXT: ; implicit-def: $vgpr7
+; GFX1250-NEXT: ; implicit-def: $vgpr8_vgpr9
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB4_1
+; GFX1250-NEXT: ; %bb.2:
+; GFX1250-NEXT: s_mov_b32 exec_lo, s2
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret float %ret
}
@@ -346,6 +419,43 @@ define <2 x half> @struct_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__vgpr_rsrc__
; GFX1200-NEXT: s_mov_b32 exec_lo, s2
; GFX1200-NEXT: s_wait_loadcnt 0x0
; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_v2f16_rtn__vgpr_val__vgpr_rsrc__vgpr_voffset__vgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v9, v6 :: v_dual_mov_b32 v8, v5
+; GFX1250-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: s_mov_b32 s2, exec_lo
+; GFX1250-NEXT: .LBB5_1: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT: v_readfirstlane_b32 s4, v2
+; GFX1250-NEXT: v_readfirstlane_b32 s5, v3
+; GFX1250-NEXT: v_readfirstlane_b32 s6, v4
+; GFX1250-NEXT: v_readfirstlane_b32 s7, v5
+; GFX1250-NEXT: v_readfirstlane_b32 s3, v7
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[2:3]
+; GFX1250-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[4:5]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT: v_cmp_eq_u32_e64 s1, s3, v7
+; GFX1250-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX1250-NEXT: s_and_b32 s0, s0, s1
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_and_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: buffer_atomic_pk_add_f16 v0, v[8:9], s[4:7], s3 idxen offen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5
+; GFX1250-NEXT: ; implicit-def: $vgpr7
+; GFX1250-NEXT: ; implicit-def: $vgpr8_vgpr9
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB5_1
+; GFX1250-NEXT: ; %bb.2:
+; GFX1250-NEXT: s_mov_b32 exec_lo, s2
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call <2 x half> @llvm.amdgcn.struct.ptr.buffer.atomic.fadd.v2f16(<2 x half> %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret <2 x half> %ret
}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32.ll
index e3889ab..d551d91 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32.ll
@@ -4,7 +4,8 @@
; Not supported in gfx8 or gfx9
; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 < %s | FileCheck -check-prefix=GFX10 %s
; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 < %s | FileCheck -check-prefix=GFX11 %s
-; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX1200 %s
+; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 < %s | FileCheck -check-prefix=GFX1250 %s
define float @struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(float %val, ptr addrspace(8) inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
; GFX6-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
@@ -35,16 +36,25 @@ define float @struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_vo
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: buffer_atomic_max_num_f32 v0, v[2:3], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret float %ret
}
@@ -78,16 +88,25 @@ define float @struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_vo
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen offset:256 th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen offset:256 th:TH_ATOMIC_RETURN
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_add_nc_u32 v5, 0x100, v2
+; GFX1250-NEXT: buffer_atomic_max_num_f32 v0, v[4:5], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%voffset.add = add i32 %voffset, 256
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset.add, i32 %soffset, i32 0)
ret float %ret
@@ -122,16 +141,24 @@ define float @struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__0_voffs
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_max_num_f32 v0, v1, s[0:3], s16 idxen th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_max_num_f32 v0, v1, s[0:3], s16 idxen th:TH_ATOMIC_RETURN
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: buffer_atomic_max_num_f32 v0, v1, s[0:3], s16 idxen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 %soffset, i32 0)
ret float %ret
}
@@ -165,16 +192,25 @@ define float @struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_vo
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_NT_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_NT_RETURN
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: buffer_atomic_max_num_f32 v0, v[2:3], s[0:3], s16 idxen offen th:TH_ATOMIC_NT_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 2)
ret float %ret
}
@@ -206,15 +242,23 @@ define void @struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_v
; GFX11-NEXT: buffer_atomic_max_f32 v0, v[1:2], s[0:3], s16 idxen offen
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: buffer_atomic_max_num_f32 v0, v[2:3], s[0:3], s16 idxen offen
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -246,15 +290,23 @@ define void @struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_v
; GFX11-NEXT: buffer_atomic_max_f32 v0, v[1:2], s[0:3], s16 idxen offen offset:256
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen offset:256
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen offset:256
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_add_nc_u32 v5, 0x100, v2
+; GFX1250-NEXT: buffer_atomic_max_num_f32 v0, v[4:5], s[0:3], s16 idxen offen
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%voffset.add = add i32 %voffset, 256
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset.add, i32 %soffset, i32 0)
ret void
@@ -288,15 +340,22 @@ define void @struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voff
; GFX11-NEXT: buffer_atomic_max_f32 v0, v1, s[0:3], s16 idxen
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_max_num_f32 v0, v1, s[0:3], s16 idxen
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_max_num_f32 v0, v1, s[0:3], s16 idxen
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: buffer_atomic_max_num_f32 v0, v1, s[0:3], s16 idxen
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 %soffset, i32 0)
ret void
}
@@ -328,15 +387,23 @@ define void @struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_v
; GFX11-NEXT: buffer_atomic_max_f32 v0, v[1:2], s[0:3], s16 idxen offen slc
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_NT
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_max_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_NT
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: buffer_atomic_max_num_f32 v0, v[2:3], s[0:3], s16 idxen offen th:TH_ATOMIC_NT
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 2)
ret void
}
@@ -442,36 +509,68 @@ define float @struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__vgpr_rsrc__vgpr_vo
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__vgpr_rsrc__vgpr_voffset_add__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: s_mov_b32 s2, exec_lo
-; GFX12-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: v_readfirstlane_b32 s4, v1
-; GFX12-NEXT: v_readfirstlane_b32 s5, v2
-; GFX12-NEXT: v_readfirstlane_b32 s6, v3
-; GFX12-NEXT: v_readfirstlane_b32 s7, v4
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[1:2]
-; GFX12-NEXT: v_cmp_eq_u64_e64 s1, s[6:7], v[3:4]
-; GFX12-NEXT: s_and_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_saveexec_b32 s1, s1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: buffer_atomic_max_num_f32 v0, v[5:6], s[4:7], s0 idxen offen offset:256 th:TH_ATOMIC_RETURN
-; GFX12-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4
-; GFX12-NEXT: ; implicit-def: $vgpr5_vgpr6
-; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB8_1
-; GFX12-NEXT: ; %bb.2:
-; GFX12-NEXT: s_mov_b32 exec_lo, s2
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__vgpr_rsrc__vgpr_voffset_add__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: s_mov_b32 s2, exec_lo
+; GFX1200-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1200-NEXT: v_readfirstlane_b32 s4, v1
+; GFX1200-NEXT: v_readfirstlane_b32 s5, v2
+; GFX1200-NEXT: v_readfirstlane_b32 s6, v3
+; GFX1200-NEXT: v_readfirstlane_b32 s7, v4
+; GFX1200-NEXT: s_wait_alu 0xf1ff
+; GFX1200-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1200-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[1:2]
+; GFX1200-NEXT: v_cmp_eq_u64_e64 s1, s[6:7], v[3:4]
+; GFX1200-NEXT: s_and_b32 s1, vcc_lo, s1
+; GFX1200-NEXT: s_wait_alu 0xfffe
+; GFX1200-NEXT: s_and_saveexec_b32 s1, s1
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: buffer_atomic_max_num_f32 v0, v[5:6], s[4:7], s0 idxen offen offset:256 th:TH_ATOMIC_RETURN
+; GFX1200-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4
+; GFX1200-NEXT: ; implicit-def: $vgpr5_vgpr6
+; GFX1200-NEXT: s_xor_b32 exec_lo, exec_lo, s1
+; GFX1200-NEXT: s_cbranch_execnz .LBB8_1
+; GFX1200-NEXT: ; %bb.2:
+; GFX1200-NEXT: s_mov_b32 exec_lo, s2
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__vgpr_rsrc__vgpr_voffset_add__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v8, v5 :: v_dual_mov_b32 v5, v4
+; GFX1250-NEXT: v_dual_mov_b32 v4, v3 :: v_dual_mov_b32 v3, v2
+; GFX1250-NEXT: v_dual_mov_b32 v2, v1 :: v_dual_add_nc_u32 v9, 0x100, v6
+; GFX1250-NEXT: s_mov_b32 s2, exec_lo
+; GFX1250-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT: v_readfirstlane_b32 s4, v2
+; GFX1250-NEXT: v_readfirstlane_b32 s5, v3
+; GFX1250-NEXT: v_readfirstlane_b32 s6, v4
+; GFX1250-NEXT: v_readfirstlane_b32 s7, v5
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[2:3]
+; GFX1250-NEXT: v_cmp_eq_u64_e64 s1, s[6:7], v[4:5]
+; GFX1250-NEXT: s_and_b32 s1, vcc_lo, s1
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_and_saveexec_b32 s1, s1
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: buffer_atomic_max_num_f32 v0, v[8:9], s[4:7], s0 idxen offen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5
+; GFX1250-NEXT: ; implicit-def: $vgpr8_vgpr9
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_xor_b32 exec_lo, exec_lo, s1
+; GFX1250-NEXT: s_cbranch_execnz .LBB8_1
+; GFX1250-NEXT: ; %bb.2:
+; GFX1250-NEXT: s_mov_b32 exec_lo, s2
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%voffset.add = add i32 %voffset, 256
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset.add, i32 %soffset, i32 0)
ret float %ret
@@ -595,41 +694,78 @@ define float @struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_vo
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__vgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: s_mov_b32 s2, exec_lo
-; GFX12-NEXT: .LBB9_1: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: v_readfirstlane_b32 s4, v1
-; GFX12-NEXT: v_readfirstlane_b32 s5, v2
-; GFX12-NEXT: v_readfirstlane_b32 s6, v3
-; GFX12-NEXT: v_readfirstlane_b32 s7, v4
-; GFX12-NEXT: v_readfirstlane_b32 s3, v7
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[1:2]
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[3:4]
-; GFX12-NEXT: v_cmp_eq_u32_e64 s1, s3, v7
-; GFX12-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_b32 s0, s0, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_saveexec_b32 s0, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: buffer_atomic_max_num_f32 v0, v[5:6], s[4:7], s3 idxen offen offset:256 th:TH_ATOMIC_RETURN
-; GFX12-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4
-; GFX12-NEXT: ; implicit-def: $vgpr7
-; GFX12-NEXT: ; implicit-def: $vgpr5_vgpr6
-; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB9_1
-; GFX12-NEXT: ; %bb.2:
-; GFX12-NEXT: s_mov_b32 exec_lo, s2
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__vgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: s_mov_b32 s2, exec_lo
+; GFX1200-NEXT: .LBB9_1: ; =>This Inner Loop Header: Depth=1
+; GFX1200-NEXT: v_readfirstlane_b32 s4, v1
+; GFX1200-NEXT: v_readfirstlane_b32 s5, v2
+; GFX1200-NEXT: v_readfirstlane_b32 s6, v3
+; GFX1200-NEXT: v_readfirstlane_b32 s7, v4
+; GFX1200-NEXT: v_readfirstlane_b32 s3, v7
+; GFX1200-NEXT: s_wait_alu 0xf1ff
+; GFX1200-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[1:2]
+; GFX1200-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1200-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[3:4]
+; GFX1200-NEXT: v_cmp_eq_u32_e64 s1, s3, v7
+; GFX1200-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX1200-NEXT: s_wait_alu 0xfffe
+; GFX1200-NEXT: s_and_b32 s0, s0, s1
+; GFX1200-NEXT: s_wait_alu 0xfffe
+; GFX1200-NEXT: s_and_saveexec_b32 s0, s0
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: buffer_atomic_max_num_f32 v0, v[5:6], s[4:7], s3 idxen offen offset:256 th:TH_ATOMIC_RETURN
+; GFX1200-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4
+; GFX1200-NEXT: ; implicit-def: $vgpr7
+; GFX1200-NEXT: ; implicit-def: $vgpr5_vgpr6
+; GFX1200-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX1200-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1200-NEXT: ; %bb.2:
+; GFX1200-NEXT: s_mov_b32 exec_lo, s2
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_add_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_add__vgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v8, v5 :: v_dual_mov_b32 v5, v4
+; GFX1250-NEXT: v_dual_mov_b32 v4, v3 :: v_dual_mov_b32 v3, v2
+; GFX1250-NEXT: v_dual_mov_b32 v2, v1 :: v_dual_add_nc_u32 v9, 0x100, v6
+; GFX1250-NEXT: s_mov_b32 s2, exec_lo
+; GFX1250-NEXT: .LBB9_1: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT: v_readfirstlane_b32 s4, v2
+; GFX1250-NEXT: v_readfirstlane_b32 s5, v3
+; GFX1250-NEXT: v_readfirstlane_b32 s6, v4
+; GFX1250-NEXT: v_readfirstlane_b32 s7, v5
+; GFX1250-NEXT: v_readfirstlane_b32 s3, v7
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[2:3]
+; GFX1250-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[4:5]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT: v_cmp_eq_u32_e64 s1, s3, v7
+; GFX1250-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX1250-NEXT: s_and_b32 s0, s0, s1
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_and_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: buffer_atomic_max_num_f32 v0, v[8:9], s[4:7], s3 idxen offen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5
+; GFX1250-NEXT: ; implicit-def: $vgpr7
+; GFX1250-NEXT: ; implicit-def: $vgpr8_vgpr9
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1250-NEXT: ; %bb.2:
+; GFX1250-NEXT: s_mov_b32 exec_lo, s2
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%voffset.add = add i32 %voffset, 256
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmax.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset.add, i32 %soffset, i32 0)
ret float %ret
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32.ll
index f001bf9..0096289 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32.ll
@@ -4,7 +4,8 @@
; Not supported in gfx8 or gfx9
; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 < %s | FileCheck -check-prefix=GFX10 %s
; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 < %s | FileCheck -check-prefix=GFX11 %s
-; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 < %s | FileCheck -check-prefix=GFX1200 %s
+; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1250 < %s | FileCheck -check-prefix=GFX1250 %s
define float @struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset(float %val, ptr addrspace(8) inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
; GFX6-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
@@ -35,16 +36,25 @@ define float @struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_v
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: buffer_atomic_min_num_f32 v0, v[2:3], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret float %ret
}
@@ -78,16 +88,25 @@ define float @struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_v
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_fmin__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen offset:256 th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_fmin__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen offset:256 th:TH_ATOMIC_RETURN
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_fmin__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_add_nc_u32 v5, 0x100, v2
+; GFX1250-NEXT: buffer_atomic_min_num_f32 v0, v[4:5], s[0:3], s16 idxen offen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%voffset.add = add i32 %voffset, 256
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset.add, i32 %soffset, i32 0)
ret float %ret
@@ -122,16 +141,24 @@ define float @struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__0_voff
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_min_num_f32 v0, v1, s[0:3], s16 idxen th:TH_ATOMIC_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_min_num_f32 v0, v1, s[0:3], s16 idxen th:TH_ATOMIC_RETURN
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: buffer_atomic_min_num_f32 v0, v1, s[0:3], s16 idxen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 %soffset, i32 0)
ret float %ret
}
@@ -165,16 +192,25 @@ define float @struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_v
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_NT_RETURN
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_NT_RETURN
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: buffer_atomic_min_num_f32 v0, v[2:3], s[0:3], s16 idxen offen th:TH_ATOMIC_NT_RETURN
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 2)
ret float %ret
}
@@ -206,15 +242,23 @@ define void @struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_
; GFX11-NEXT: buffer_atomic_min_f32 v0, v[1:2], s[0:3], s16 idxen offen
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: buffer_atomic_min_num_f32 v0, v[2:3], s[0:3], s16 idxen offen
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret void
}
@@ -246,15 +290,23 @@ define void @struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_
; GFX11-NEXT: buffer_atomic_min_f32 v0, v[1:2], s[0:3], s16 idxen offen offset:256
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_fmin__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen offset:256
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_fmin__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen offset:256
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset_fmin__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v4, v1 :: v_dual_add_nc_u32 v5, 0x100, v2
+; GFX1250-NEXT: buffer_atomic_min_num_f32 v0, v[4:5], s[0:3], s16 idxen offen
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%voffset.add = add i32 %voffset, 256
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset.add, i32 %soffset, i32 0)
ret void
@@ -288,15 +340,22 @@ define void @struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__0_vof
; GFX11-NEXT: buffer_atomic_min_f32 v0, v1, s[0:3], s16 idxen
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_min_num_f32 v0, v1, s[0:3], s16 idxen
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_min_num_f32 v0, v1, s[0:3], s16 idxen
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__0_voffset__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: buffer_atomic_min_num_f32 v0, v1, s[0:3], s16 idxen
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 0, i32 %soffset, i32 0)
ret void
}
@@ -328,15 +387,23 @@ define void @struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_
; GFX11-NEXT: buffer_atomic_min_f32 v0, v[1:2], s[0:3], s16 idxen offen slc
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_NT
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: buffer_atomic_min_num_f32 v0, v[1:2], s[0:3], s16 idxen offen th:TH_ATOMIC_NT
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_fmin_f32_noret__vgpr_val__sgpr_rsrc__vgpr_voffset__sgpr_soffset_slc:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v3, v2 :: v_dual_mov_b32 v2, v1
+; GFX1250-NEXT: buffer_atomic_min_num_f32 v0, v[2:3], s[0:3], s16 idxen offen th:TH_ATOMIC_NT
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 2)
ret void
}
@@ -442,36 +509,68 @@ define float @struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__vgpr_rsrc__vgpr_v
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__vgpr_rsrc__vgpr_voffset_fmin__sgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: s_mov_b32 s2, exec_lo
-; GFX12-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: v_readfirstlane_b32 s4, v1
-; GFX12-NEXT: v_readfirstlane_b32 s5, v2
-; GFX12-NEXT: v_readfirstlane_b32 s6, v3
-; GFX12-NEXT: v_readfirstlane_b32 s7, v4
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[1:2]
-; GFX12-NEXT: v_cmp_eq_u64_e64 s1, s[6:7], v[3:4]
-; GFX12-NEXT: s_and_b32 s1, vcc_lo, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_saveexec_b32 s1, s1
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: buffer_atomic_min_num_f32 v0, v[5:6], s[4:7], s0 idxen offen offset:256 th:TH_ATOMIC_RETURN
-; GFX12-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4
-; GFX12-NEXT: ; implicit-def: $vgpr5_vgpr6
-; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s1
-; GFX12-NEXT: s_cbranch_execnz .LBB8_1
-; GFX12-NEXT: ; %bb.2:
-; GFX12-NEXT: s_mov_b32 exec_lo, s2
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__vgpr_rsrc__vgpr_voffset_fmin__sgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: s_mov_b32 s2, exec_lo
+; GFX1200-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1200-NEXT: v_readfirstlane_b32 s4, v1
+; GFX1200-NEXT: v_readfirstlane_b32 s5, v2
+; GFX1200-NEXT: v_readfirstlane_b32 s6, v3
+; GFX1200-NEXT: v_readfirstlane_b32 s7, v4
+; GFX1200-NEXT: s_wait_alu 0xf1ff
+; GFX1200-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1200-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[1:2]
+; GFX1200-NEXT: v_cmp_eq_u64_e64 s1, s[6:7], v[3:4]
+; GFX1200-NEXT: s_and_b32 s1, vcc_lo, s1
+; GFX1200-NEXT: s_wait_alu 0xfffe
+; GFX1200-NEXT: s_and_saveexec_b32 s1, s1
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: buffer_atomic_min_num_f32 v0, v[5:6], s[4:7], s0 idxen offen offset:256 th:TH_ATOMIC_RETURN
+; GFX1200-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4
+; GFX1200-NEXT: ; implicit-def: $vgpr5_vgpr6
+; GFX1200-NEXT: s_xor_b32 exec_lo, exec_lo, s1
+; GFX1200-NEXT: s_cbranch_execnz .LBB8_1
+; GFX1200-NEXT: ; %bb.2:
+; GFX1200-NEXT: s_mov_b32 exec_lo, s2
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__vgpr_rsrc__vgpr_voffset_fmin__sgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v8, v5 :: v_dual_mov_b32 v5, v4
+; GFX1250-NEXT: v_dual_mov_b32 v4, v3 :: v_dual_mov_b32 v3, v2
+; GFX1250-NEXT: v_dual_mov_b32 v2, v1 :: v_dual_add_nc_u32 v9, 0x100, v6
+; GFX1250-NEXT: s_mov_b32 s2, exec_lo
+; GFX1250-NEXT: .LBB8_1: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT: v_readfirstlane_b32 s4, v2
+; GFX1250-NEXT: v_readfirstlane_b32 s5, v3
+; GFX1250-NEXT: v_readfirstlane_b32 s6, v4
+; GFX1250-NEXT: v_readfirstlane_b32 s7, v5
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[2:3]
+; GFX1250-NEXT: v_cmp_eq_u64_e64 s1, s[6:7], v[4:5]
+; GFX1250-NEXT: s_and_b32 s1, vcc_lo, s1
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_and_saveexec_b32 s1, s1
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: buffer_atomic_min_num_f32 v0, v[8:9], s[4:7], s0 idxen offen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5
+; GFX1250-NEXT: ; implicit-def: $vgpr8_vgpr9
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_xor_b32 exec_lo, exec_lo, s1
+; GFX1250-NEXT: s_cbranch_execnz .LBB8_1
+; GFX1250-NEXT: ; %bb.2:
+; GFX1250-NEXT: s_mov_b32 exec_lo, s2
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%voffset.add = add i32 %voffset, 256
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset.add, i32 %soffset, i32 0)
ret float %ret
@@ -595,41 +694,78 @@ define float @struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_v
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GFX12-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_fmin__vgpr_soffset:
-; GFX12: ; %bb.0:
-; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-NEXT: s_wait_expcnt 0x0
-; GFX12-NEXT: s_wait_samplecnt 0x0
-; GFX12-NEXT: s_wait_bvhcnt 0x0
-; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: s_mov_b32 s2, exec_lo
-; GFX12-NEXT: .LBB9_1: ; =>This Inner Loop Header: Depth=1
-; GFX12-NEXT: v_readfirstlane_b32 s4, v1
-; GFX12-NEXT: v_readfirstlane_b32 s5, v2
-; GFX12-NEXT: v_readfirstlane_b32 s6, v3
-; GFX12-NEXT: v_readfirstlane_b32 s7, v4
-; GFX12-NEXT: v_readfirstlane_b32 s3, v7
-; GFX12-NEXT: s_wait_alu 0xf1ff
-; GFX12-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[1:2]
-; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX12-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[3:4]
-; GFX12-NEXT: v_cmp_eq_u32_e64 s1, s3, v7
-; GFX12-NEXT: s_and_b32 s0, vcc_lo, s0
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_b32 s0, s0, s1
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_saveexec_b32 s0, s0
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: buffer_atomic_min_num_f32 v0, v[5:6], s[4:7], s3 idxen offen offset:256 th:TH_ATOMIC_RETURN
-; GFX12-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4
-; GFX12-NEXT: ; implicit-def: $vgpr7
-; GFX12-NEXT: ; implicit-def: $vgpr5_vgpr6
-; GFX12-NEXT: s_xor_b32 exec_lo, exec_lo, s0
-; GFX12-NEXT: s_cbranch_execnz .LBB9_1
-; GFX12-NEXT: ; %bb.2:
-; GFX12-NEXT: s_mov_b32 exec_lo, s2
-; GFX12-NEXT: s_wait_loadcnt 0x0
-; GFX12-NEXT: s_setpc_b64 s[30:31]
+; GFX1200-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_fmin__vgpr_soffset:
+; GFX1200: ; %bb.0:
+; GFX1200-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1200-NEXT: s_wait_expcnt 0x0
+; GFX1200-NEXT: s_wait_samplecnt 0x0
+; GFX1200-NEXT: s_wait_bvhcnt 0x0
+; GFX1200-NEXT: s_wait_kmcnt 0x0
+; GFX1200-NEXT: s_mov_b32 s2, exec_lo
+; GFX1200-NEXT: .LBB9_1: ; =>This Inner Loop Header: Depth=1
+; GFX1200-NEXT: v_readfirstlane_b32 s4, v1
+; GFX1200-NEXT: v_readfirstlane_b32 s5, v2
+; GFX1200-NEXT: v_readfirstlane_b32 s6, v3
+; GFX1200-NEXT: v_readfirstlane_b32 s7, v4
+; GFX1200-NEXT: v_readfirstlane_b32 s3, v7
+; GFX1200-NEXT: s_wait_alu 0xf1ff
+; GFX1200-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[1:2]
+; GFX1200-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1200-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[3:4]
+; GFX1200-NEXT: v_cmp_eq_u32_e64 s1, s3, v7
+; GFX1200-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX1200-NEXT: s_wait_alu 0xfffe
+; GFX1200-NEXT: s_and_b32 s0, s0, s1
+; GFX1200-NEXT: s_wait_alu 0xfffe
+; GFX1200-NEXT: s_and_saveexec_b32 s0, s0
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: buffer_atomic_min_num_f32 v0, v[5:6], s[4:7], s3 idxen offen offset:256 th:TH_ATOMIC_RETURN
+; GFX1200-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4
+; GFX1200-NEXT: ; implicit-def: $vgpr7
+; GFX1200-NEXT: ; implicit-def: $vgpr5_vgpr6
+; GFX1200-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX1200-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1200-NEXT: ; %bb.2:
+; GFX1200-NEXT: s_mov_b32 exec_lo, s2
+; GFX1200-NEXT: s_wait_loadcnt 0x0
+; GFX1200-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: struct_ptr_buffer_atomic_fmin_f32_ret__vgpr_val__sgpr_rsrc__vgpr_voffset_fmin__vgpr_soffset:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_dual_mov_b32 v8, v5 :: v_dual_mov_b32 v5, v4
+; GFX1250-NEXT: v_dual_mov_b32 v4, v3 :: v_dual_mov_b32 v3, v2
+; GFX1250-NEXT: v_dual_mov_b32 v2, v1 :: v_dual_add_nc_u32 v9, 0x100, v6
+; GFX1250-NEXT: s_mov_b32 s2, exec_lo
+; GFX1250-NEXT: .LBB9_1: ; =>This Inner Loop Header: Depth=1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT: v_readfirstlane_b32 s4, v2
+; GFX1250-NEXT: v_readfirstlane_b32 s5, v3
+; GFX1250-NEXT: v_readfirstlane_b32 s6, v4
+; GFX1250-NEXT: v_readfirstlane_b32 s7, v5
+; GFX1250-NEXT: v_readfirstlane_b32 s3, v7
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[4:5], v[2:3]
+; GFX1250-NEXT: v_cmp_eq_u64_e64 s0, s[6:7], v[4:5]
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT: v_cmp_eq_u32_e64 s1, s3, v7
+; GFX1250-NEXT: s_and_b32 s0, vcc_lo, s0
+; GFX1250-NEXT: s_and_b32 s0, s0, s1
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT: s_and_saveexec_b32 s0, s0
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: buffer_atomic_min_num_f32 v0, v[8:9], s[4:7], s3 idxen offen th:TH_ATOMIC_RETURN
+; GFX1250-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5
+; GFX1250-NEXT: ; implicit-def: $vgpr7
+; GFX1250-NEXT: ; implicit-def: $vgpr8_vgpr9
+; GFX1250-NEXT: s_wait_xcnt 0x0
+; GFX1250-NEXT: s_xor_b32 exec_lo, exec_lo, s0
+; GFX1250-NEXT: s_cbranch_execnz .LBB9_1
+; GFX1250-NEXT: ; %bb.2:
+; GFX1250-NEXT: s_mov_b32 exec_lo, s2
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%voffset.add = add i32 %voffset, 256
%ret = call float @llvm.amdgcn.struct.ptr.buffer.atomic.fmin.f32(float %val, ptr addrspace(8) %rsrc, i32 %vindex, i32 %voffset.add, i32 %soffset, i32 0)
ret float %ret
diff --git a/llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll b/llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll
index a3ebaec..5f0ca7b 100644
--- a/llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll
+++ b/llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll
@@ -74,7 +74,8 @@ define amdgpu_kernel void @local_stack_offset_uses_sp(ptr addrspace(1) %out) {
; FLATSCR-NEXT: s_waitcnt vmcnt(0)
; FLATSCR-NEXT: s_cbranch_scc1 .LBB0_1
; FLATSCR-NEXT: ; %bb.2: ; %split
-; FLATSCR-NEXT: s_movk_i32 s0, 0x5000
+; FLATSCR-NEXT: s_movk_i32 s0, 0x2000
+; FLATSCR-NEXT: s_addk_i32 s0, 0x3000
; FLATSCR-NEXT: scratch_load_dwordx2 v[0:1], off, s0 offset:208 glc
; FLATSCR-NEXT: s_waitcnt vmcnt(0)
; FLATSCR-NEXT: s_movk_i32 s0, 0x3000
@@ -175,7 +176,9 @@ define void @func_local_stack_offset_uses_sp(ptr addrspace(1) %out) {
; FLATSCR-NEXT: s_waitcnt vmcnt(0)
; FLATSCR-NEXT: s_cbranch_scc1 .LBB1_1
; FLATSCR-NEXT: ; %bb.2: ; %split
-; FLATSCR-NEXT: s_add_i32 s0, s33, 0x5000
+; FLATSCR-NEXT: s_movk_i32 s0, 0x2000
+; FLATSCR-NEXT: s_add_i32 s1, s33, s0
+; FLATSCR-NEXT: s_add_i32 s0, s1, 0x3000
; FLATSCR-NEXT: scratch_load_dwordx2 v[2:3], off, s0 offset:208 glc
; FLATSCR-NEXT: s_waitcnt vmcnt(0)
; FLATSCR-NEXT: s_add_i32 s0, s33, 0x3000
@@ -223,30 +226,35 @@ define amdgpu_kernel void @local_stack_offset_uses_sp_flat(ptr addrspace(1) %out
; MUBUF-NEXT: s_waitcnt vmcnt(0)
; MUBUF-NEXT: s_cbranch_scc1 .LBB2_1
; MUBUF-NEXT: ; %bb.2: ; %split
+; MUBUF-NEXT: s_movk_i32 s5, 0x12d4
; MUBUF-NEXT: v_mov_b32_e32 v1, 0x4000
-; MUBUF-NEXT: v_or_b32_e32 v0, 0x12d4, v1
+; MUBUF-NEXT: v_or_b32_e32 v0, s5, v1
+; MUBUF-NEXT: s_movk_i32 s5, 0x12d0
; MUBUF-NEXT: v_mov_b32_e32 v1, 0x4000
; MUBUF-NEXT: s_movk_i32 s4, 0x4000
; MUBUF-NEXT: buffer_load_dword v5, v0, s[0:3], 0 offen glc
; MUBUF-NEXT: s_waitcnt vmcnt(0)
-; MUBUF-NEXT: v_or_b32_e32 v0, 0x12d0, v1
+; MUBUF-NEXT: v_or_b32_e32 v0, s5, v1
+; MUBUF-NEXT: s_movk_i32 s5, 0x12c4
; MUBUF-NEXT: v_mov_b32_e32 v1, 0x4000
; MUBUF-NEXT: s_or_b32 s4, s4, 0x12c0
; MUBUF-NEXT: buffer_load_dword v4, v0, s[0:3], 0 offen glc
; MUBUF-NEXT: s_waitcnt vmcnt(0)
-; MUBUF-NEXT: v_or_b32_e32 v0, 0x12c4, v1
-; MUBUF-NEXT: v_mov_b32_e32 v3, 0x4000
+; MUBUF-NEXT: v_or_b32_e32 v0, s5, v1
; MUBUF-NEXT: buffer_load_dword v1, v0, s[0:3], 0 offen glc
; MUBUF-NEXT: s_waitcnt vmcnt(0)
; MUBUF-NEXT: v_mov_b32_e32 v0, s4
-; MUBUF-NEXT: v_or_b32_e32 v2, 0x12cc, v3
+; MUBUF-NEXT: s_movk_i32 s4, 0x12cc
+; MUBUF-NEXT: v_mov_b32_e32 v3, 0x4000
+; MUBUF-NEXT: v_or_b32_e32 v2, s4, v3
+; MUBUF-NEXT: s_movk_i32 s4, 0x12c8
; MUBUF-NEXT: v_mov_b32_e32 v6, 0x4000
; MUBUF-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen glc
; MUBUF-NEXT: s_waitcnt vmcnt(0)
; MUBUF-NEXT: v_mov_b32_e32 v7, 0x4000
; MUBUF-NEXT: buffer_load_dword v3, v2, s[0:3], 0 offen glc
; MUBUF-NEXT: s_waitcnt vmcnt(0)
-; MUBUF-NEXT: v_or_b32_e32 v2, 0x12c8, v6
+; MUBUF-NEXT: v_or_b32_e32 v2, s4, v6
; MUBUF-NEXT: v_mov_b32_e32 v8, 0x4000
; MUBUF-NEXT: v_mov_b32_e32 v9, 0x4000
; MUBUF-NEXT: buffer_load_dword v2, v2, s[0:3], 0 offen glc
@@ -298,7 +306,8 @@ define amdgpu_kernel void @local_stack_offset_uses_sp_flat(ptr addrspace(1) %out
; FLATSCR-NEXT: s_waitcnt vmcnt(0)
; FLATSCR-NEXT: s_cbranch_scc1 .LBB2_1
; FLATSCR-NEXT: ; %bb.2: ; %split
-; FLATSCR-NEXT: s_movk_i32 s0, 0x3000
+; FLATSCR-NEXT: s_movk_i32 s0, 0x1000
+; FLATSCR-NEXT: s_addk_i32 s0, 0x2000
; FLATSCR-NEXT: scratch_load_dwordx2 v[8:9], off, s0 offset:720 glc
; FLATSCR-NEXT: s_waitcnt vmcnt(0)
; FLATSCR-NEXT: scratch_load_dwordx4 v[0:3], off, s0 offset:704 glc
diff --git a/llvm/test/CodeGen/AMDGPU/mad-mix-lo-bf16.ll b/llvm/test/CodeGen/AMDGPU/mad-mix-lo-bf16.ll
index 1b2eb83..4393172 100644
--- a/llvm/test/CodeGen/AMDGPU/mad-mix-lo-bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/mad-mix-lo-bf16.ll
@@ -74,10 +74,11 @@ define bfloat @v_mad_mixlo_bf16_bf16lo_bf16lo_f32_clamp_post_cvt(bfloat %src0, b
; GFX1250: ; %bb.0:
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_fma_mixlo_bf16 v0, v0, v1, v2 op_sel_hi:[1,1,0]
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-NEXT: v_pk_max_num_bf16 v0, v0, v0 clamp
+; GFX1250-NEXT: v_fma_mixlo_bf16 v0, v0, v1, v2 op_sel_hi:[1,1,0] clamp
; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+
+
+
%src0.ext = fpext bfloat %src0 to float
%src1.ext = fpext bfloat %src1 to float
%result = tail call float @llvm.fmuladd.f32(float %src0.ext, float %src1.ext, float %src2)
@@ -191,10 +192,11 @@ define <2 x bfloat> @v_mad_mix_v2f32_clamp_postcvt(<2 x bfloat> %src0, <2 x bflo
; GFX1250-NEXT: v_lshlrev_b32_e32 v0, 16, v2
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-NEXT: v_pk_fma_f32 v[0:1], v[4:5], v[6:7], v[0:1]
-; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v1
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-NEXT: v_pk_max_num_bf16 v0, v0, v0 clamp
+; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v1 clamp
; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+
+
+
%src0.ext = fpext <2 x bfloat> %src0 to <2 x float>
%src1.ext = fpext <2 x bfloat> %src1 to <2 x float>
%src2.ext = fpext <2 x bfloat> %src2 to <2 x float>
@@ -247,12 +249,12 @@ define <4 x bfloat> @v_mad_mix_v4f32_clamp_postcvt(<4 x bfloat> %src0, <4 x bflo
; GFX1250-NEXT: v_pk_fma_f32 v[0:1], v[6:7], v[0:1], v[2:3]
; GFX1250-NEXT: v_pk_fma_f32 v[2:3], v[8:9], v[10:11], v[12:13]
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v1
-; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v2, v3
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX1250-NEXT: v_pk_max_num_bf16 v0, v0, v0 clamp
-; GFX1250-NEXT: v_pk_max_num_bf16 v1, v1, v1 clamp
+; GFX1250-NEXT: v_cvt_pk_bf16_f32 v0, v0, v1 clamp
+; GFX1250-NEXT: v_cvt_pk_bf16_f32 v1, v2, v3 clamp
; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+
+
+
%src0.ext = fpext <4 x bfloat> %src0 to <4 x float>
%src1.ext = fpext <4 x bfloat> %src1 to <4 x float>
%src2.ext = fpext <4 x bfloat> %src2 to <4 x float>
diff --git a/llvm/test/CodeGen/AMDGPU/no-folding-imm-to-inst-with-fi.ll b/llvm/test/CodeGen/AMDGPU/no-folding-imm-to-inst-with-fi.ll
new file mode 100644
index 0000000..6d0aa1e
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/no-folding-imm-to-inst-with-fi.ll
@@ -0,0 +1,108 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck %s
+
+define protected amdgpu_kernel void @no_folding_imm_to_inst_with_fi(<4 x i64> %val4, <16 x i64> %val16) {
+; CHECK-LABEL: no_folding_imm_to_inst_with_fi:
+; CHECK: ; %bb.0: ; %bb
+; CHECK-NEXT: s_clause 0x2
+; CHECK-NEXT: s_load_b256 s[36:43], s[4:5], 0x24
+; CHECK-NEXT: s_load_b512 s[16:31], s[4:5], 0xe4
+; CHECK-NEXT: s_load_b512 s[0:15], s[4:5], 0xa4
+; CHECK-NEXT: s_mov_b64 s[34:35], src_private_base
+; CHECK-NEXT: s_movk_i32 s33, 0x70
+; CHECK-NEXT: s_movk_i32 s34, 0x60
+; CHECK-NEXT: s_or_b32 s44, 0x80, s33
+; CHECK-NEXT: s_mov_b32 s45, s35
+; CHECK-NEXT: s_or_b32 s46, 0x80, s34
+; CHECK-NEXT: s_mov_b32 s47, s35
+; CHECK-NEXT: v_dual_mov_b32 v20, s44 :: v_dual_mov_b32 v21, s45
+; CHECK-NEXT: v_dual_mov_b32 v22, s46 :: v_dual_mov_b32 v23, s47
+; CHECK-NEXT: s_movk_i32 s34, 0x80
+; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; CHECK-NEXT: v_dual_mov_b32 v34, s34 :: v_dual_mov_b32 v35, s35
+; CHECK-NEXT: s_wait_kmcnt 0x0
+; CHECK-NEXT: v_dual_mov_b32 v0, s40 :: v_dual_mov_b32 v1, s41
+; CHECK-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43
+; CHECK-NEXT: v_dual_mov_b32 v4, s36 :: v_dual_mov_b32 v5, s37
+; CHECK-NEXT: v_dual_mov_b32 v6, s38 :: v_dual_mov_b32 v7, s39
+; CHECK-NEXT: scratch_store_b128 off, v[0:3], off offset:16 scope:SCOPE_SYS
+; CHECK-NEXT: s_wait_storecnt 0x0
+; CHECK-NEXT: v_dual_mov_b32 v0, s20 :: v_dual_mov_b32 v1, s21
+; CHECK-NEXT: s_movk_i32 s20, 0x50
+; CHECK-NEXT: v_dual_mov_b32 v8, s28 :: v_dual_mov_b32 v9, s29
+; CHECK-NEXT: v_dual_mov_b32 v10, s30 :: v_dual_mov_b32 v11, s31
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: s_or_b32 s20, 0x80, s20
+; CHECK-NEXT: s_mov_b32 s21, s35
+; CHECK-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
+; CHECK-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
+; CHECK-NEXT: v_dual_mov_b32 v2, s22 :: v_dual_mov_b32 v3, s23
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: v_dual_mov_b32 v25, s21 :: v_dual_mov_b32 v24, s20
+; CHECK-NEXT: scratch_store_b128 off, v[4:7], off scope:SCOPE_SYS
+; CHECK-NEXT: s_wait_storecnt 0x0
+; CHECK-NEXT: flat_store_b128 v[20:21], v[8:11] scope:SCOPE_SYS
+; CHECK-NEXT: s_wait_storecnt 0x0
+; CHECK-NEXT: flat_store_b128 v[22:23], v[12:15] scope:SCOPE_SYS
+; CHECK-NEXT: s_wait_storecnt 0x0
+; CHECK-NEXT: flat_store_b128 v[24:25], v[0:3] scope:SCOPE_SYS
+; CHECK-NEXT: s_wait_storecnt 0x0
+; CHECK-NEXT: v_dual_mov_b32 v0, s16 :: v_dual_mov_b32 v1, s17
+; CHECK-NEXT: s_or_b32 s16, 0x80, 64
+; CHECK-NEXT: s_mov_b32 s17, s35
+; CHECK-NEXT: v_dual_mov_b32 v4, s12 :: v_dual_mov_b32 v5, s13
+; CHECK-NEXT: s_or_b32 s12, 0x80, 48
+; CHECK-NEXT: s_mov_b32 s13, s35
+; CHECK-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
+; CHECK-NEXT: s_or_b32 s8, 0x80, 32
+; CHECK-NEXT: s_mov_b32 s9, s35
+; CHECK-NEXT: v_dual_mov_b32 v12, s4 :: v_dual_mov_b32 v13, s5
+; CHECK-NEXT: s_or_b32 s4, 0x80, 16
+; CHECK-NEXT: s_mov_b32 s5, s35
+; CHECK-NEXT: v_dual_mov_b32 v2, s18 :: v_dual_mov_b32 v3, s19
+; CHECK-NEXT: s_wait_alu 0xfffe
+; CHECK-NEXT: v_dual_mov_b32 v27, s17 :: v_dual_mov_b32 v26, s16
+; CHECK-NEXT: v_dual_mov_b32 v6, s14 :: v_dual_mov_b32 v7, s15
+; CHECK-NEXT: v_dual_mov_b32 v29, s13 :: v_dual_mov_b32 v28, s12
+; CHECK-NEXT: v_dual_mov_b32 v31, s9 :: v_dual_mov_b32 v30, s8
+; CHECK-NEXT: v_dual_mov_b32 v33, s5 :: v_dual_mov_b32 v32, s4
+; CHECK-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
+; CHECK-NEXT: v_dual_mov_b32 v14, s6 :: v_dual_mov_b32 v15, s7
+; CHECK-NEXT: v_dual_mov_b32 v16, s0 :: v_dual_mov_b32 v17, s1
+; CHECK-NEXT: v_dual_mov_b32 v18, s2 :: v_dual_mov_b32 v19, s3
+; CHECK-NEXT: flat_store_b128 v[26:27], v[0:3] scope:SCOPE_SYS
+; CHECK-NEXT: s_wait_storecnt 0x0
+; CHECK-NEXT: flat_store_b128 v[28:29], v[4:7] scope:SCOPE_SYS
+; CHECK-NEXT: s_wait_storecnt 0x0
+; CHECK-NEXT: flat_store_b128 v[30:31], v[8:11] scope:SCOPE_SYS
+; CHECK-NEXT: s_wait_storecnt 0x0
+; CHECK-NEXT: flat_store_b128 v[32:33], v[12:15] scope:SCOPE_SYS
+; CHECK-NEXT: s_wait_storecnt 0x0
+; CHECK-NEXT: flat_store_b128 v[34:35], v[16:19] scope:SCOPE_SYS
+; CHECK-NEXT: s_wait_storecnt 0x0
+; CHECK-NEXT: flat_load_b128 v[0:3], v[22:23] scope:SCOPE_SYS
+; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
+; CHECK-NEXT: flat_load_b128 v[0:3], v[20:21] scope:SCOPE_SYS
+; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
+; CHECK-NEXT: flat_load_b128 v[0:3], v[26:27] scope:SCOPE_SYS
+; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
+; CHECK-NEXT: flat_load_b128 v[0:3], v[24:25] scope:SCOPE_SYS
+; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
+; CHECK-NEXT: flat_load_b128 v[0:3], v[30:31] scope:SCOPE_SYS
+; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
+; CHECK-NEXT: flat_load_b128 v[0:3], v[28:29] scope:SCOPE_SYS
+; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
+; CHECK-NEXT: flat_load_b128 v[0:3], v[34:35] scope:SCOPE_SYS
+; CHECK-NEXT: s_wait_loadcnt_dscnt 0x0
+; CHECK-NEXT: flat_load_b128 v[0:3], v[32:33] scope:SCOPE_SYS
+; CHECK-NEXT: s_wait_loadcnt 0x0
+; CHECK-NEXT: s_endpgm
+bb:
+ %alloca = alloca <4 x i64>, align 32, addrspace(5)
+ %alloca1 = alloca <16 x i64>, align 128, addrspace(5)
+ store volatile <4 x i64> %val4, ptr addrspace(5) %alloca
+ %ascast = addrspacecast ptr addrspace(5) %alloca1 to ptr
+ store volatile <16 x i64> %val16, ptr %ascast
+ %load = load volatile <16 x i64>, ptr %ascast
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/packed-fp32.ll b/llvm/test/CodeGen/AMDGPU/packed-fp32.ll
index 42401af..8304be9 100644
--- a/llvm/test/CodeGen/AMDGPU/packed-fp32.ll
+++ b/llvm/test/CodeGen/AMDGPU/packed-fp32.ll
@@ -78,12 +78,14 @@ define amdgpu_kernel void @fadd_v2_vs(ptr addrspace(1) %a, <2 x float> %x) {
; GFX1250-LABEL: fadd_v2_vs:
; GFX1250: ; %bb.0:
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1250-NEXT: v_and_b32_e32 v2, 0x3ff, v0
+; GFX1250-NEXT: v_and_b32_e32 v4, 0x3ff, v0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset
+; GFX1250-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset
+; GFX1250-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-NEXT: s_wait_loadcnt 0x0
-; GFX1250-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[2:3]
-; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[2:3]
+; GFX1250-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset
; GFX1250-NEXT: s_endpgm
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id
@@ -142,13 +144,16 @@ define amdgpu_kernel void @fadd_v4_vs(ptr addrspace(1) %a, <4 x float> %x) {
; GFX1250-SDAG-NEXT: s_clause 0x1
; GFX1250-SDAG-NEXT: s_load_b64 s[6:7], s[4:5], 0x24
; GFX1250-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x34
-; GFX1250-SDAG-NEXT: v_and_b32_e32 v4, 0x3ff, v0
+; GFX1250-SDAG-NEXT: v_and_b32_e32 v8, 0x3ff, v0
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
-; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v4, s[6:7] scale_offset
+; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v8, s[6:7] scale_offset
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[6:7], s[0:1]
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
-; GFX1250-SDAG-NEXT: v_pk_add_f32 v[2:3], v[2:3], s[2:3]
-; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[0:1]
-; GFX1250-SDAG-NEXT: global_store_b128 v4, v[0:3], s[6:7] scale_offset
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[2:3], v[2:3], v[4:5]
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[6:7]
+; GFX1250-SDAG-NEXT: global_store_b128 v8, v[0:3], s[6:7] scale_offset
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: fadd_v4_vs:
@@ -156,13 +161,16 @@ define amdgpu_kernel void @fadd_v4_vs(ptr addrspace(1) %a, <4 x float> %x) {
; GFX1250-GISEL-NEXT: s_clause 0x1
; GFX1250-GISEL-NEXT: s_load_b64 s[6:7], s[4:5], 0x24
; GFX1250-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x34
-; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v8, 0x3ff, v0
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v4, s[6:7] scale_offset
+; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v8, s[6:7] scale_offset
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[0:1]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[6:7], s[2:3]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[0:1]
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[2:3], v[2:3], s[2:3]
-; GFX1250-GISEL-NEXT: global_store_b128 v4, v[0:3], s[6:7] scale_offset
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[2:3], v[2:3], v[6:7]
+; GFX1250-GISEL-NEXT: global_store_b128 v8, v[0:3], s[6:7] scale_offset
; GFX1250-GISEL-NEXT: s_endpgm
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds <4 x float>, ptr addrspace(1) %a, i32 %id
@@ -332,56 +340,69 @@ define amdgpu_kernel void @fadd_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
;
; GFX1250-SDAG-LABEL: fadd_v32_vs:
; GFX1250-SDAG: ; %bb.0:
-; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-SDAG-NEXT: s_load_b64 s[34:35], s[4:5], 0x24
; GFX1250-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-SDAG-NEXT: v_lshlrev_b32_e32 v32, 7, v0
+; GFX1250-SDAG-NEXT: v_lshlrev_b32_e32 v40, 7, v0
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
; GFX1250-SDAG-NEXT: s_clause 0x7
-; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v32, s[0:1] offset:16
-; GFX1250-SDAG-NEXT: global_load_b128 v[4:7], v32, s[0:1]
-; GFX1250-SDAG-NEXT: global_load_b128 v[8:11], v32, s[0:1] offset:48
-; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v32, s[0:1] offset:32
-; GFX1250-SDAG-NEXT: global_load_b128 v[12:15], v32, s[0:1] offset:80
-; GFX1250-SDAG-NEXT: global_load_b128 v[16:19], v32, s[0:1] offset:64
-; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v32, s[0:1] offset:112
-; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v32, s[0:1] offset:96
-; GFX1250-SDAG-NEXT: s_clause 0x1
-; GFX1250-SDAG-NEXT: s_load_b512 s[8:23], s[4:5], 0xa4
-; GFX1250-SDAG-NEXT: s_load_b512 s[36:51], s[4:5], 0xe4
-; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x7
+; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v40, s[34:35] offset:16
+; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v40, s[34:35] offset:48
+; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v40, s[34:35] offset:32
+; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v40, s[34:35]
+; GFX1250-SDAG-NEXT: global_load_b128 v[4:7], v40, s[34:35] offset:80
+; GFX1250-SDAG-NEXT: global_load_b128 v[16:19], v40, s[34:35] offset:96
+; GFX1250-SDAG-NEXT: global_load_b128 v[8:11], v40, s[34:35] offset:64
+; GFX1250-SDAG-NEXT: global_load_b128 v[12:15], v40, s[34:35] offset:112
+; GFX1250-SDAG-NEXT: s_load_b512 s[16:31], s[4:5], 0xa4
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_load_b512 s[0:15], s[4:5], 0xe4
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
-; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[12:13]
-; GFX1250-SDAG-NEXT: v_pk_add_f32 v[2:3], v[2:3], s[14:15]
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v34, s20 :: v_dual_mov_b32 v35, s21
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v38, s22 :: v_dual_mov_b32 v39, s23
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v32, s18 :: v_dual_mov_b32 v37, s29
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v42, s30 :: v_dual_mov_b32 v43, s31
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v44, s24 :: v_dual_mov_b32 v33, s19
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v36, s28 :: v_dual_mov_b32 v57, s15
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v53, s3 :: v_dual_mov_b32 v54, s12
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v55, s13 :: v_dual_mov_b32 v56, s14
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v51, s7 :: v_dual_mov_b32 v52, s2
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v47, s27 :: v_dual_mov_b32 v48, s4
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v49, s5 :: v_dual_mov_b32 v50, s6
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v45, s25 :: v_dual_mov_b32 v46, s26
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x7
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[28:29], v[28:29], v[34:35]
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[30:31], v[30:31], v[38:39]
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v34, s8 :: v_dual_mov_b32 v35, s9
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v38, s10 :: v_dual_mov_b32 v39, s11
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x6
-; GFX1250-SDAG-NEXT: v_pk_add_f32 v[6:7], v[6:7], s[10:11]
-; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x4
-; GFX1250-SDAG-NEXT: v_pk_add_f32 v[20:21], v[20:21], s[16:17]
-; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x3
-; GFX1250-SDAG-NEXT: v_pk_add_f32 v[12:13], v[12:13], s[40:41]
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[26:27], v[26:27], v[42:43]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[42:43], s[0:1]
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[24:25], v[24:25], v[36:37]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[36:37], s[16:17]
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x2
-; GFX1250-SDAG-NEXT: v_pk_add_f32 v[18:19], v[18:19], s[38:39]
-; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x1
-; GFX1250-SDAG-NEXT: v_pk_add_f32 v[24:25], v[24:25], s[48:49]
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[16:17], v[16:17], v[34:35]
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[18:19], v[18:19], v[38:39]
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
-; GFX1250-SDAG-NEXT: v_pk_add_f32 v[28:29], v[28:29], s[44:45]
-; GFX1250-SDAG-NEXT: v_pk_add_f32 v[30:31], v[30:31], s[46:47]
-; GFX1250-SDAG-NEXT: v_pk_add_f32 v[26:27], v[26:27], s[50:51]
-; GFX1250-SDAG-NEXT: v_pk_add_f32 v[16:17], v[16:17], s[36:37]
-; GFX1250-SDAG-NEXT: v_pk_add_f32 v[14:15], v[14:15], s[42:43]
-; GFX1250-SDAG-NEXT: v_pk_add_f32 v[22:23], v[22:23], s[18:19]
-; GFX1250-SDAG-NEXT: v_pk_add_f32 v[8:9], v[8:9], s[20:21]
-; GFX1250-SDAG-NEXT: v_pk_add_f32 v[10:11], v[10:11], s[22:23]
-; GFX1250-SDAG-NEXT: v_pk_add_f32 v[4:5], v[4:5], s[8:9]
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[12:13], v[12:13], v[54:55]
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[14:15], v[14:15], v[56:57]
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[10:11], v[10:11], v[52:53]
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[8:9], v[8:9], v[42:43]
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[4:5], v[4:5], v[48:49]
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[6:7], v[6:7], v[50:51]
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[20:21], v[20:21], v[44:45]
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[22:23], v[22:23], v[46:47]
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[2:3], v[2:3], v[32:33]
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[36:37]
; GFX1250-SDAG-NEXT: s_clause 0x7
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[28:31], s[0:1] offset:96
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[24:27], s[0:1] offset:112
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[16:19], s[0:1] offset:64
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[12:15], s[0:1] offset:80
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[20:23], s[0:1] offset:32
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[8:11], s[0:1] offset:48
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[4:7], s[0:1]
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[0:3], s[0:1] offset:16
+; GFX1250-SDAG-NEXT: global_store_b128 v40, v[16:19], s[34:35] offset:96
+; GFX1250-SDAG-NEXT: global_store_b128 v40, v[12:15], s[34:35] offset:112
+; GFX1250-SDAG-NEXT: global_store_b128 v40, v[8:11], s[34:35] offset:64
+; GFX1250-SDAG-NEXT: global_store_b128 v40, v[4:7], s[34:35] offset:80
+; GFX1250-SDAG-NEXT: global_store_b128 v40, v[20:23], s[34:35] offset:32
+; GFX1250-SDAG-NEXT: global_store_b128 v40, v[24:27], s[34:35] offset:48
+; GFX1250-SDAG-NEXT: global_store_b128 v40, v[0:3], s[34:35]
+; GFX1250-SDAG-NEXT: global_store_b128 v40, v[28:31], s[34:35] offset:16
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: fadd_v32_vs:
@@ -389,54 +410,70 @@ define amdgpu_kernel void @fadd_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
; GFX1250-GISEL-NEXT: s_load_b64 s[34:35], s[4:5], 0x24
; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_lshlrev_b32_e32 v32, 7, v0
+; GFX1250-GISEL-NEXT: v_lshlrev_b32_e32 v56, 7, v0
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
; GFX1250-GISEL-NEXT: s_clause 0x7
-; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v32, s[34:35]
-; GFX1250-GISEL-NEXT: global_load_b128 v[4:7], v32, s[34:35] offset:16
-; GFX1250-GISEL-NEXT: global_load_b128 v[8:11], v32, s[34:35] offset:32
-; GFX1250-GISEL-NEXT: global_load_b128 v[12:15], v32, s[34:35] offset:48
-; GFX1250-GISEL-NEXT: global_load_b128 v[16:19], v32, s[34:35] offset:64
-; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v32, s[34:35] offset:80
-; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v32, s[34:35] offset:96
-; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v32, s[34:35] offset:112
+; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v56, s[34:35]
+; GFX1250-GISEL-NEXT: global_load_b128 v[4:7], v56, s[34:35] offset:16
+; GFX1250-GISEL-NEXT: global_load_b128 v[8:11], v56, s[34:35] offset:32
+; GFX1250-GISEL-NEXT: global_load_b128 v[12:15], v56, s[34:35] offset:48
+; GFX1250-GISEL-NEXT: global_load_b128 v[16:19], v56, s[34:35] offset:64
+; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v56, s[34:35] offset:80
+; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v56, s[34:35] offset:96
+; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v56, s[34:35] offset:112
; GFX1250-GISEL-NEXT: s_load_b512 s[16:31], s[4:5], 0xa4
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_load_b512 s[0:15], s[4:5], 0xe4
-; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x7
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[16:17]
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[2:3], v[2:3], s[18:19]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[16:17]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[18:19]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[20:21]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[22:23]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[40:41], s[24:25]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[42:43], s[26:27]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[44:45], s[28:29]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[46:47], s[30:31]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[48:49], s[0:1]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[50:51], s[2:3]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[52:53], s[4:5]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[54:55], s[6:7]
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x7
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[32:33]
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[2:3], v[2:3], v[34:35]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[8:9]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[10:11]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x6
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[4:5], v[4:5], s[20:21]
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[6:7], v[6:7], s[22:23]
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[4:5], v[4:5], v[36:37]
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[6:7], v[6:7], v[38:39]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[12:13]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[14:15]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x5
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[8:9], v[8:9], s[24:25]
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[10:11], v[10:11], s[26:27]
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[8:9], v[8:9], v[40:41]
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[10:11], v[10:11], v[42:43]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x4
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[12:13], v[12:13], s[28:29]
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[14:15], v[14:15], s[30:31]
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[12:13], v[12:13], v[44:45]
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[14:15], v[14:15], v[46:47]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x3
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[16:17], v[16:17], s[0:1]
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[18:19], v[18:19], s[2:3]
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[16:17], v[16:17], v[48:49]
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[18:19], v[18:19], v[50:51]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x2
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[20:21], v[20:21], s[4:5]
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[22:23], v[22:23], s[6:7]
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[20:21], v[20:21], v[52:53]
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[22:23], v[22:23], v[54:55]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x1
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[24:25], v[24:25], s[8:9]
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[26:27], v[26:27], s[10:11]
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[24:25], v[24:25], v[32:33]
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[26:27], v[26:27], v[34:35]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[28:29], v[28:29], s[12:13]
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[30:31], v[30:31], s[14:15]
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[28:29], v[28:29], v[36:37]
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[30:31], v[30:31], v[38:39]
; GFX1250-GISEL-NEXT: s_clause 0x7
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[0:3], s[34:35]
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[4:7], s[34:35] offset:16
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[8:11], s[34:35] offset:32
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[12:15], s[34:35] offset:48
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[16:19], s[34:35] offset:64
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[20:23], s[34:35] offset:80
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[24:27], s[34:35] offset:96
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[28:31], s[34:35] offset:112
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[0:3], s[34:35]
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[4:7], s[34:35] offset:16
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[8:11], s[34:35] offset:32
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[12:15], s[34:35] offset:48
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[16:19], s[34:35] offset:64
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[20:23], s[34:35] offset:80
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[24:27], s[34:35] offset:96
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[28:31], s[34:35] offset:112
; GFX1250-GISEL-NEXT: s_endpgm
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds <32 x float>, ptr addrspace(1) %a, i32 %id
@@ -502,15 +539,16 @@ define amdgpu_kernel void @fadd_v2_v_imm(ptr addrspace(1) %a) {
; GFX1250-GISEL-LABEL: fadd_v2_v_imm:
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
-; GFX1250-GISEL-NEXT: v_and_b32_e32 v2, 0x3ff, v0
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0
; GFX1250-GISEL-NEXT: s_mov_b32 s2, 0x42c80000
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1250-GISEL-NEXT: s_mov_b32 s3, s2
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset
+; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[2:3]
-; GFX1250-GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[2:3]
+; GFX1250-GISEL-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset
; GFX1250-GISEL-NEXT: s_endpgm
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id
@@ -645,15 +683,16 @@ define amdgpu_kernel void @fadd_v2_v_lit_splat(ptr addrspace(1) %a) {
; GFX1250-GISEL-LABEL: fadd_v2_v_lit_splat:
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
-; GFX1250-GISEL-NEXT: v_and_b32_e32 v2, 0x3ff, v0
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0
; GFX1250-GISEL-NEXT: s_mov_b32 s2, 1.0
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1250-GISEL-NEXT: s_mov_b32 s3, s2
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset
+; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[2:3]
-; GFX1250-GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[2:3]
+; GFX1250-GISEL-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset
; GFX1250-GISEL-NEXT: s_endpgm
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id
@@ -703,13 +742,15 @@ define amdgpu_kernel void @fadd_v2_v_lit_hi0(ptr addrspace(1) %a) {
; GFX1250-GISEL-LABEL: fadd_v2_v_lit_hi0:
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
-; GFX1250-GISEL-NEXT: v_and_b32_e32 v2, 0x3ff, v0
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0
; GFX1250-GISEL-NEXT: s_mov_b64 s[2:3], 0x3f800000
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset
+; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[2:3]
-; GFX1250-GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[2:3]
+; GFX1250-GISEL-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset
; GFX1250-GISEL-NEXT: s_endpgm
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id
@@ -746,17 +787,31 @@ define amdgpu_kernel void @fadd_v2_v_lit_lo0(ptr addrspace(1) %a) {
; PACKED-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; PACKED-NEXT: s_endpgm
;
-; GFX1250-LABEL: fadd_v2_v_lit_lo0:
-; GFX1250: ; %bb.0:
-; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
-; GFX1250-NEXT: v_and_b32_e32 v2, 0x3ff, v0
-; GFX1250-NEXT: s_mov_b64 s[2:3], lit64(0x3f80000000000000)
-; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset
-; GFX1250-NEXT: s_wait_loadcnt 0x0
-; GFX1250-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[2:3]
-; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset
-; GFX1250-NEXT: s_endpgm
+; GFX1250-SDAG-LABEL: fadd_v2_v_lit_lo0:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-SDAG-NEXT: v_and_b32_e32 v4, 0x3ff, v0
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[2:3], lit64(0x3f80000000000000)
+; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: fadd_v2_v_lit_lo0:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0
+; GFX1250-GISEL-NEXT: s_mov_b64 s[2:3], lit64(0x3f80000000000000)
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
+; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[2:3]
+; GFX1250-GISEL-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset
+; GFX1250-GISEL-NEXT: s_endpgm
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id
%load = load <2 x float>, ptr addrspace(1) %gep, align 8
@@ -792,17 +847,31 @@ define amdgpu_kernel void @fadd_v2_v_unfoldable_lit(ptr addrspace(1) %a) {
; PACKED-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; PACKED-NEXT: s_endpgm
;
-; GFX1250-LABEL: fadd_v2_v_unfoldable_lit:
-; GFX1250: ; %bb.0:
-; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
-; GFX1250-NEXT: v_and_b32_e32 v2, 0x3ff, v0
-; GFX1250-NEXT: s_mov_b64 s[2:3], lit64(0x400000003f800000)
-; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset
-; GFX1250-NEXT: s_wait_loadcnt 0x0
-; GFX1250-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[2:3]
-; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset
-; GFX1250-NEXT: s_endpgm
+; GFX1250-SDAG-LABEL: fadd_v2_v_unfoldable_lit:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-SDAG-NEXT: v_and_b32_e32 v4, 0x3ff, v0
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[2:3], lit64(0x400000003f800000)
+; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: fadd_v2_v_unfoldable_lit:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0
+; GFX1250-GISEL-NEXT: s_mov_b64 s[2:3], lit64(0x400000003f800000)
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
+; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[2:3]
+; GFX1250-GISEL-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset
+; GFX1250-GISEL-NEXT: s_endpgm
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id
%load = load <2 x float>, ptr addrspace(1) %gep, align 8
@@ -1085,12 +1154,14 @@ define amdgpu_kernel void @fadd_v2_v_fneg_lo2(ptr addrspace(1) %a, float %x, flo
; GFX1250-SDAG-LABEL: fadd_v2_v_fneg_lo2:
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1250-SDAG-NEXT: v_and_b32_e32 v2, 0x3ff, v0
+; GFX1250-SDAG-NEXT: v_and_b32_e32 v4, 0x3ff, v0
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
-; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset
+; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
-; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[2:3] neg_lo:[0,1]
-; GFX1250-SDAG-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[2:3] neg_lo:[0,1]
+; GFX1250-SDAG-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: fadd_v2_v_fneg_lo2:
@@ -1159,12 +1230,14 @@ define amdgpu_kernel void @fadd_v2_v_fneg_hi2(ptr addrspace(1) %a, float %x, flo
; GFX1250-SDAG-LABEL: fadd_v2_v_fneg_hi2:
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1250-SDAG-NEXT: v_and_b32_e32 v2, 0x3ff, v0
+; GFX1250-SDAG-NEXT: v_and_b32_e32 v4, 0x3ff, v0
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
-; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset
+; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
-; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], s[2:3] op_sel:[0,1] op_sel_hi:[1,0] neg_hi:[0,1]
-; GFX1250-SDAG-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[2:3] op_sel:[0,1] op_sel_hi:[1,0] neg_hi:[0,1]
+; GFX1250-SDAG-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: fadd_v2_v_fneg_hi2:
@@ -1262,12 +1335,14 @@ define amdgpu_kernel void @fmul_v2_vs(ptr addrspace(1) %a, <2 x float> %x) {
; GFX1250-LABEL: fmul_v2_vs:
; GFX1250: ; %bb.0:
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1250-NEXT: v_and_b32_e32 v2, 0x3ff, v0
+; GFX1250-NEXT: v_and_b32_e32 v4, 0x3ff, v0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset
+; GFX1250-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset
+; GFX1250-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-NEXT: s_wait_loadcnt 0x0
-; GFX1250-NEXT: v_pk_mul_f32 v[0:1], v[0:1], s[2:3]
-; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_pk_mul_f32 v[0:1], v[0:1], v[2:3]
+; GFX1250-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset
; GFX1250-NEXT: s_endpgm
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id
@@ -1326,13 +1401,16 @@ define amdgpu_kernel void @fmul_v4_vs(ptr addrspace(1) %a, <4 x float> %x) {
; GFX1250-SDAG-NEXT: s_clause 0x1
; GFX1250-SDAG-NEXT: s_load_b64 s[6:7], s[4:5], 0x24
; GFX1250-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x34
-; GFX1250-SDAG-NEXT: v_and_b32_e32 v4, 0x3ff, v0
+; GFX1250-SDAG-NEXT: v_and_b32_e32 v8, 0x3ff, v0
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
-; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v4, s[6:7] scale_offset
+; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v8, s[6:7] scale_offset
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[6:7], s[0:1]
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v4, s2 :: v_dual_mov_b32 v5, s3
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
-; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[2:3], v[2:3], s[2:3]
-; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[0:1], v[0:1], s[0:1]
-; GFX1250-SDAG-NEXT: global_store_b128 v4, v[0:3], s[6:7] scale_offset
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[2:3], v[2:3], v[4:5]
+; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[0:1], v[0:1], v[6:7]
+; GFX1250-SDAG-NEXT: global_store_b128 v8, v[0:3], s[6:7] scale_offset
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: fmul_v4_vs:
@@ -1340,13 +1418,16 @@ define amdgpu_kernel void @fmul_v4_vs(ptr addrspace(1) %a, <4 x float> %x) {
; GFX1250-GISEL-NEXT: s_clause 0x1
; GFX1250-GISEL-NEXT: s_load_b64 s[6:7], s[4:5], 0x24
; GFX1250-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x34
-; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v8, 0x3ff, v0
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v4, s[6:7] scale_offset
+; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v8, s[6:7] scale_offset
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[0:1]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[6:7], s[2:3]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], v[0:1], s[0:1]
-; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[2:3], v[2:3], s[2:3]
-; GFX1250-GISEL-NEXT: global_store_b128 v4, v[0:3], s[6:7] scale_offset
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], v[0:1], v[4:5]
+; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[2:3], v[2:3], v[6:7]
+; GFX1250-GISEL-NEXT: global_store_b128 v8, v[0:3], s[6:7] scale_offset
; GFX1250-GISEL-NEXT: s_endpgm
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds <4 x float>, ptr addrspace(1) %a, i32 %id
@@ -1516,56 +1597,69 @@ define amdgpu_kernel void @fmul_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
;
; GFX1250-SDAG-LABEL: fmul_v32_vs:
; GFX1250-SDAG: ; %bb.0:
-; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-SDAG-NEXT: s_load_b64 s[34:35], s[4:5], 0x24
; GFX1250-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-SDAG-NEXT: v_lshlrev_b32_e32 v32, 7, v0
+; GFX1250-SDAG-NEXT: v_lshlrev_b32_e32 v40, 7, v0
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
; GFX1250-SDAG-NEXT: s_clause 0x7
-; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v32, s[0:1] offset:16
-; GFX1250-SDAG-NEXT: global_load_b128 v[4:7], v32, s[0:1]
-; GFX1250-SDAG-NEXT: global_load_b128 v[8:11], v32, s[0:1] offset:48
-; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v32, s[0:1] offset:32
-; GFX1250-SDAG-NEXT: global_load_b128 v[12:15], v32, s[0:1] offset:80
-; GFX1250-SDAG-NEXT: global_load_b128 v[16:19], v32, s[0:1] offset:64
-; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v32, s[0:1] offset:112
-; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v32, s[0:1] offset:96
-; GFX1250-SDAG-NEXT: s_clause 0x1
-; GFX1250-SDAG-NEXT: s_load_b512 s[8:23], s[4:5], 0xa4
-; GFX1250-SDAG-NEXT: s_load_b512 s[36:51], s[4:5], 0xe4
-; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x7
+; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v40, s[34:35] offset:16
+; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v40, s[34:35] offset:48
+; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v40, s[34:35] offset:32
+; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v40, s[34:35]
+; GFX1250-SDAG-NEXT: global_load_b128 v[4:7], v40, s[34:35] offset:80
+; GFX1250-SDAG-NEXT: global_load_b128 v[16:19], v40, s[34:35] offset:96
+; GFX1250-SDAG-NEXT: global_load_b128 v[8:11], v40, s[34:35] offset:64
+; GFX1250-SDAG-NEXT: global_load_b128 v[12:15], v40, s[34:35] offset:112
+; GFX1250-SDAG-NEXT: s_load_b512 s[16:31], s[4:5], 0xa4
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_load_b512 s[0:15], s[4:5], 0xe4
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
-; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[0:1], v[0:1], s[12:13]
-; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[2:3], v[2:3], s[14:15]
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v34, s20 :: v_dual_mov_b32 v35, s21
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v38, s22 :: v_dual_mov_b32 v39, s23
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v32, s18 :: v_dual_mov_b32 v37, s29
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v42, s30 :: v_dual_mov_b32 v43, s31
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v44, s24 :: v_dual_mov_b32 v33, s19
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v36, s28 :: v_dual_mov_b32 v57, s15
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v53, s3 :: v_dual_mov_b32 v54, s12
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v55, s13 :: v_dual_mov_b32 v56, s14
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v51, s7 :: v_dual_mov_b32 v52, s2
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v47, s27 :: v_dual_mov_b32 v48, s4
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v49, s5 :: v_dual_mov_b32 v50, s6
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v45, s25 :: v_dual_mov_b32 v46, s26
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x7
+; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[28:29], v[28:29], v[34:35]
+; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[30:31], v[30:31], v[38:39]
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v34, s8 :: v_dual_mov_b32 v35, s9
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v38, s10 :: v_dual_mov_b32 v39, s11
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x6
-; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[6:7], v[6:7], s[10:11]
-; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x4
-; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[20:21], v[20:21], s[16:17]
-; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x3
-; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[12:13], v[12:13], s[40:41]
+; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[26:27], v[26:27], v[42:43]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[42:43], s[0:1]
+; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[24:25], v[24:25], v[36:37]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[36:37], s[16:17]
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x2
-; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[18:19], v[18:19], s[38:39]
-; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x1
-; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[24:25], v[24:25], s[48:49]
+; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[16:17], v[16:17], v[34:35]
+; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[18:19], v[18:19], v[38:39]
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
-; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[28:29], v[28:29], s[44:45]
-; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[30:31], v[30:31], s[46:47]
-; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[26:27], v[26:27], s[50:51]
-; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[16:17], v[16:17], s[36:37]
-; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[14:15], v[14:15], s[42:43]
-; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[22:23], v[22:23], s[18:19]
-; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[8:9], v[8:9], s[20:21]
-; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[10:11], v[10:11], s[22:23]
-; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[4:5], v[4:5], s[8:9]
+; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[12:13], v[12:13], v[54:55]
+; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[14:15], v[14:15], v[56:57]
+; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[10:11], v[10:11], v[52:53]
+; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[8:9], v[8:9], v[42:43]
+; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[4:5], v[4:5], v[48:49]
+; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[6:7], v[6:7], v[50:51]
+; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[20:21], v[20:21], v[44:45]
+; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[22:23], v[22:23], v[46:47]
+; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[2:3], v[2:3], v[32:33]
+; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[0:1], v[0:1], v[36:37]
; GFX1250-SDAG-NEXT: s_clause 0x7
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[28:31], s[0:1] offset:96
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[24:27], s[0:1] offset:112
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[16:19], s[0:1] offset:64
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[12:15], s[0:1] offset:80
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[20:23], s[0:1] offset:32
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[8:11], s[0:1] offset:48
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[4:7], s[0:1]
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[0:3], s[0:1] offset:16
+; GFX1250-SDAG-NEXT: global_store_b128 v40, v[16:19], s[34:35] offset:96
+; GFX1250-SDAG-NEXT: global_store_b128 v40, v[12:15], s[34:35] offset:112
+; GFX1250-SDAG-NEXT: global_store_b128 v40, v[8:11], s[34:35] offset:64
+; GFX1250-SDAG-NEXT: global_store_b128 v40, v[4:7], s[34:35] offset:80
+; GFX1250-SDAG-NEXT: global_store_b128 v40, v[20:23], s[34:35] offset:32
+; GFX1250-SDAG-NEXT: global_store_b128 v40, v[24:27], s[34:35] offset:48
+; GFX1250-SDAG-NEXT: global_store_b128 v40, v[0:3], s[34:35]
+; GFX1250-SDAG-NEXT: global_store_b128 v40, v[28:31], s[34:35] offset:16
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: fmul_v32_vs:
@@ -1573,54 +1667,70 @@ define amdgpu_kernel void @fmul_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
; GFX1250-GISEL-NEXT: s_load_b64 s[34:35], s[4:5], 0x24
; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_lshlrev_b32_e32 v32, 7, v0
+; GFX1250-GISEL-NEXT: v_lshlrev_b32_e32 v56, 7, v0
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
; GFX1250-GISEL-NEXT: s_clause 0x7
-; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v32, s[34:35]
-; GFX1250-GISEL-NEXT: global_load_b128 v[4:7], v32, s[34:35] offset:16
-; GFX1250-GISEL-NEXT: global_load_b128 v[8:11], v32, s[34:35] offset:32
-; GFX1250-GISEL-NEXT: global_load_b128 v[12:15], v32, s[34:35] offset:48
-; GFX1250-GISEL-NEXT: global_load_b128 v[16:19], v32, s[34:35] offset:64
-; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v32, s[34:35] offset:80
-; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v32, s[34:35] offset:96
-; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v32, s[34:35] offset:112
+; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v56, s[34:35]
+; GFX1250-GISEL-NEXT: global_load_b128 v[4:7], v56, s[34:35] offset:16
+; GFX1250-GISEL-NEXT: global_load_b128 v[8:11], v56, s[34:35] offset:32
+; GFX1250-GISEL-NEXT: global_load_b128 v[12:15], v56, s[34:35] offset:48
+; GFX1250-GISEL-NEXT: global_load_b128 v[16:19], v56, s[34:35] offset:64
+; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v56, s[34:35] offset:80
+; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v56, s[34:35] offset:96
+; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v56, s[34:35] offset:112
; GFX1250-GISEL-NEXT: s_load_b512 s[16:31], s[4:5], 0xa4
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_load_b512 s[0:15], s[4:5], 0xe4
-; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x7
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], v[0:1], s[16:17]
-; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[2:3], v[2:3], s[18:19]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[16:17]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[18:19]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[20:21]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[22:23]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[40:41], s[24:25]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[42:43], s[26:27]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[44:45], s[28:29]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[46:47], s[30:31]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[48:49], s[0:1]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[50:51], s[2:3]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[52:53], s[4:5]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[54:55], s[6:7]
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x7
+; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], v[0:1], v[32:33]
+; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[2:3], v[2:3], v[34:35]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[8:9]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[10:11]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x6
-; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[4:5], v[4:5], s[20:21]
-; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[6:7], v[6:7], s[22:23]
+; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[4:5], v[4:5], v[36:37]
+; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[6:7], v[6:7], v[38:39]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[12:13]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[14:15]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x5
-; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[8:9], v[8:9], s[24:25]
-; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[10:11], v[10:11], s[26:27]
+; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[8:9], v[8:9], v[40:41]
+; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[10:11], v[10:11], v[42:43]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x4
-; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[12:13], v[12:13], s[28:29]
-; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[14:15], v[14:15], s[30:31]
+; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[12:13], v[12:13], v[44:45]
+; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[14:15], v[14:15], v[46:47]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x3
-; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[16:17], v[16:17], s[0:1]
-; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[18:19], v[18:19], s[2:3]
+; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[16:17], v[16:17], v[48:49]
+; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[18:19], v[18:19], v[50:51]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x2
-; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[20:21], v[20:21], s[4:5]
-; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[22:23], v[22:23], s[6:7]
+; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[20:21], v[20:21], v[52:53]
+; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[22:23], v[22:23], v[54:55]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x1
-; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[24:25], v[24:25], s[8:9]
-; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[26:27], v[26:27], s[10:11]
+; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[24:25], v[24:25], v[32:33]
+; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[26:27], v[26:27], v[34:35]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[28:29], v[28:29], s[12:13]
-; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[30:31], v[30:31], s[14:15]
+; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[28:29], v[28:29], v[36:37]
+; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[30:31], v[30:31], v[38:39]
; GFX1250-GISEL-NEXT: s_clause 0x7
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[0:3], s[34:35]
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[4:7], s[34:35] offset:16
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[8:11], s[34:35] offset:32
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[12:15], s[34:35] offset:48
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[16:19], s[34:35] offset:64
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[20:23], s[34:35] offset:80
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[24:27], s[34:35] offset:96
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[28:31], s[34:35] offset:112
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[0:3], s[34:35]
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[4:7], s[34:35] offset:16
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[8:11], s[34:35] offset:32
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[12:15], s[34:35] offset:48
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[16:19], s[34:35] offset:64
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[20:23], s[34:35] offset:80
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[24:27], s[34:35] offset:96
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[28:31], s[34:35] offset:112
; GFX1250-GISEL-NEXT: s_endpgm
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds <32 x float>, ptr addrspace(1) %a, i32 %id
@@ -1685,15 +1795,16 @@ define amdgpu_kernel void @fmul_v2_v_imm(ptr addrspace(1) %a) {
; GFX1250-GISEL-LABEL: fmul_v2_v_imm:
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
-; GFX1250-GISEL-NEXT: v_and_b32_e32 v2, 0x3ff, v0
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0
; GFX1250-GISEL-NEXT: s_mov_b32 s2, 0x42c80000
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1250-GISEL-NEXT: s_mov_b32 s3, s2
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset
+; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], v[0:1], s[2:3]
-; GFX1250-GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset
+; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], v[0:1], v[2:3]
+; GFX1250-GISEL-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset
; GFX1250-GISEL-NEXT: s_endpgm
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id
@@ -1828,15 +1939,16 @@ define amdgpu_kernel void @fmul_v2_v_lit_splat(ptr addrspace(1) %a) {
; GFX1250-GISEL-LABEL: fmul_v2_v_lit_splat:
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
-; GFX1250-GISEL-NEXT: v_and_b32_e32 v2, 0x3ff, v0
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0
; GFX1250-GISEL-NEXT: s_mov_b32 s2, 4.0
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1250-GISEL-NEXT: s_mov_b32 s3, s2
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset
+; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], v[0:1], s[2:3]
-; GFX1250-GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset
+; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], v[0:1], v[2:3]
+; GFX1250-GISEL-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset
; GFX1250-GISEL-NEXT: s_endpgm
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id
@@ -1873,17 +1985,31 @@ define amdgpu_kernel void @fmul_v2_v_unfoldable_lit(ptr addrspace(1) %a) {
; PACKED-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; PACKED-NEXT: s_endpgm
;
-; GFX1250-LABEL: fmul_v2_v_unfoldable_lit:
-; GFX1250: ; %bb.0:
-; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
-; GFX1250-NEXT: v_and_b32_e32 v2, 0x3ff, v0
-; GFX1250-NEXT: s_mov_b64 s[2:3], lit64(0x4040000040800000)
-; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset
-; GFX1250-NEXT: s_wait_loadcnt 0x0
-; GFX1250-NEXT: v_pk_mul_f32 v[0:1], v[0:1], s[2:3]
-; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset
-; GFX1250-NEXT: s_endpgm
+; GFX1250-SDAG-LABEL: fmul_v2_v_unfoldable_lit:
+; GFX1250-SDAG: ; %bb.0:
+; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-SDAG-NEXT: v_and_b32_e32 v4, 0x3ff, v0
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[2:3], lit64(0x4040000040800000)
+; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
+; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
+; GFX1250-SDAG-NEXT: v_pk_mul_f32 v[0:1], v[0:1], v[2:3]
+; GFX1250-SDAG-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset
+; GFX1250-SDAG-NEXT: s_endpgm
+;
+; GFX1250-GISEL-LABEL: fmul_v2_v_unfoldable_lit:
+; GFX1250-GISEL: ; %bb.0:
+; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0
+; GFX1250-GISEL-NEXT: s_mov_b64 s[2:3], lit64(0x4040000040800000)
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
+; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
+; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
+; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], v[0:1], v[2:3]
+; GFX1250-GISEL-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset
+; GFX1250-GISEL-NEXT: s_endpgm
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id
%load = load <2 x float>, ptr addrspace(1) %gep, align 8
@@ -2040,12 +2166,14 @@ define amdgpu_kernel void @fma_v2_vs(ptr addrspace(1) %a, <2 x float> %x) {
; GFX1250-LABEL: fma_v2_vs:
; GFX1250: ; %bb.0:
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1250-NEXT: v_and_b32_e32 v2, 0x3ff, v0
+; GFX1250-NEXT: v_and_b32_e32 v4, 0x3ff, v0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset
+; GFX1250-NEXT: global_load_b64 v[0:1], v4, s[0:1] scale_offset
+; GFX1250-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-NEXT: s_wait_loadcnt 0x0
-; GFX1250-NEXT: v_pk_fma_f32 v[0:1], v[0:1], s[2:3], s[2:3]
-; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_pk_fma_f32 v[0:1], v[0:1], v[2:3], v[2:3]
+; GFX1250-NEXT: global_store_b64 v4, v[0:1], s[0:1] scale_offset
; GFX1250-NEXT: s_endpgm
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id
@@ -2104,13 +2232,16 @@ define amdgpu_kernel void @fma_v4_vs(ptr addrspace(1) %a, <4 x float> %x) {
; GFX1250-SDAG-NEXT: s_clause 0x1
; GFX1250-SDAG-NEXT: s_load_b64 s[6:7], s[4:5], 0x24
; GFX1250-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x34
-; GFX1250-SDAG-NEXT: v_and_b32_e32 v4, 0x3ff, v0
+; GFX1250-SDAG-NEXT: v_and_b32_e32 v8, 0x3ff, v0
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
-; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v4, s[6:7] scale_offset
+; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v8, s[6:7] scale_offset
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[4:5], s[2:3]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[6:7], s[0:1]
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
-; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[2:3], v[2:3], s[2:3], s[2:3]
-; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[0:1], v[0:1], s[0:1], s[0:1]
-; GFX1250-SDAG-NEXT: global_store_b128 v4, v[0:3], s[6:7] scale_offset
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[2:3], v[2:3], v[4:5], v[4:5]
+; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[0:1], v[0:1], v[6:7], v[6:7]
+; GFX1250-SDAG-NEXT: global_store_b128 v8, v[0:3], s[6:7] scale_offset
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: fma_v4_vs:
@@ -2118,13 +2249,16 @@ define amdgpu_kernel void @fma_v4_vs(ptr addrspace(1) %a, <4 x float> %x) {
; GFX1250-GISEL-NEXT: s_clause 0x1
; GFX1250-GISEL-NEXT: s_load_b64 s[6:7], s[4:5], 0x24
; GFX1250-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x34
-; GFX1250-GISEL-NEXT: v_and_b32_e32 v4, 0x3ff, v0
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v8, 0x3ff, v0
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v4, s[6:7] scale_offset
+; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v8, s[6:7] scale_offset
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[0:1]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[6:7], s[2:3]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[0:1], v[0:1], s[0:1], s[0:1]
-; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[2:3], v[2:3], s[2:3], s[2:3]
-; GFX1250-GISEL-NEXT: global_store_b128 v4, v[0:3], s[6:7] scale_offset
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[0:1], v[0:1], v[4:5], v[4:5]
+; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[2:3], v[2:3], v[6:7], v[6:7]
+; GFX1250-GISEL-NEXT: global_store_b128 v8, v[0:3], s[6:7] scale_offset
; GFX1250-GISEL-NEXT: s_endpgm
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds <4 x float>, ptr addrspace(1) %a, i32 %id
@@ -2294,56 +2428,68 @@ define amdgpu_kernel void @fma_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
;
; GFX1250-SDAG-LABEL: fma_v32_vs:
; GFX1250-SDAG: ; %bb.0:
-; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
+; GFX1250-SDAG-NEXT: s_load_b64 s[34:35], s[4:5], 0x24
; GFX1250-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-SDAG-NEXT: v_lshlrev_b32_e32 v32, 7, v0
+; GFX1250-SDAG-NEXT: v_lshlrev_b32_e32 v34, 7, v0
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
; GFX1250-SDAG-NEXT: s_clause 0x7
-; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v32, s[0:1] offset:16
-; GFX1250-SDAG-NEXT: global_load_b128 v[4:7], v32, s[0:1]
-; GFX1250-SDAG-NEXT: global_load_b128 v[8:11], v32, s[0:1] offset:48
-; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v32, s[0:1] offset:32
-; GFX1250-SDAG-NEXT: global_load_b128 v[12:15], v32, s[0:1] offset:80
-; GFX1250-SDAG-NEXT: global_load_b128 v[16:19], v32, s[0:1] offset:64
-; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v32, s[0:1] offset:112
-; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v32, s[0:1] offset:96
-; GFX1250-SDAG-NEXT: s_clause 0x1
-; GFX1250-SDAG-NEXT: s_load_b512 s[8:23], s[4:5], 0xa4
-; GFX1250-SDAG-NEXT: s_load_b512 s[36:51], s[4:5], 0xe4
-; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x7
+; GFX1250-SDAG-NEXT: global_load_b128 v[28:31], v34, s[34:35] offset:16
+; GFX1250-SDAG-NEXT: global_load_b128 v[24:27], v34, s[34:35] offset:48
+; GFX1250-SDAG-NEXT: global_load_b128 v[20:23], v34, s[34:35] offset:32
+; GFX1250-SDAG-NEXT: global_load_b128 v[0:3], v34, s[34:35]
+; GFX1250-SDAG-NEXT: global_load_b128 v[4:7], v34, s[34:35] offset:80
+; GFX1250-SDAG-NEXT: global_load_b128 v[16:19], v34, s[34:35] offset:96
+; GFX1250-SDAG-NEXT: global_load_b128 v[8:11], v34, s[34:35] offset:64
+; GFX1250-SDAG-NEXT: global_load_b128 v[12:15], v34, s[34:35] offset:112
+; GFX1250-SDAG-NEXT: s_load_b512 s[16:31], s[4:5], 0xa4
+; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
+; GFX1250-SDAG-NEXT: s_load_b512 s[0:15], s[4:5], 0xe4
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
-; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[0:1], v[0:1], s[12:13], s[12:13]
-; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[2:3], v[2:3], s[14:15], s[14:15]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[36:37], s[20:21]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[38:39], s[22:23]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[42:43], s[30:31]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[40:41], s[28:29]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[54:55], s[12:13]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[56:57], s[14:15]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[52:53], s[2:3]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[48:49], s[4:5]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[50:51], s[6:7]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[44:45], s[24:25]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[46:47], s[26:27]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[32:33], s[18:19]
+; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x7
+; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[28:29], v[28:29], v[36:37], v[36:37]
+; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[30:31], v[30:31], v[38:39], v[38:39]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[36:37], s[8:9]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[38:39], s[10:11]
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x6
-; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[6:7], v[6:7], s[10:11], s[10:11]
-; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x4
-; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[20:21], v[20:21], s[16:17], s[16:17]
-; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x3
-; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[12:13], v[12:13], s[40:41], s[40:41]
-; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x2
-; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[18:19], v[18:19], s[38:39], s[38:39]
-; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x1
-; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[24:25], v[24:25], s[48:49], s[48:49]
+; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[26:27], v[26:27], v[42:43], v[42:43]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[42:43], s[0:1]
+; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[24:25], v[24:25], v[40:41], v[40:41]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[40:41], s[16:17]
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
-; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[28:29], v[28:29], s[44:45], s[44:45]
-; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[30:31], v[30:31], s[46:47], s[46:47]
-; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[26:27], v[26:27], s[50:51], s[50:51]
-; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[16:17], v[16:17], s[36:37], s[36:37]
-; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[14:15], v[14:15], s[42:43], s[42:43]
-; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[22:23], v[22:23], s[18:19], s[18:19]
-; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[8:9], v[8:9], s[20:21], s[20:21]
-; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[10:11], v[10:11], s[22:23], s[22:23]
-; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[4:5], v[4:5], s[8:9], s[8:9]
+; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[12:13], v[12:13], v[54:55], v[54:55]
+; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[16:17], v[16:17], v[36:37], v[36:37]
+; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[18:19], v[18:19], v[38:39], v[38:39]
+; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[14:15], v[14:15], v[56:57], v[56:57]
+; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[10:11], v[10:11], v[52:53], v[52:53]
+; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[8:9], v[8:9], v[42:43], v[42:43]
+; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[4:5], v[4:5], v[48:49], v[48:49]
+; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[6:7], v[6:7], v[50:51], v[50:51]
+; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[20:21], v[20:21], v[44:45], v[44:45]
+; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[22:23], v[22:23], v[46:47], v[46:47]
+; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[2:3], v[2:3], v[32:33], v[32:33]
+; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[0:1], v[0:1], v[40:41], v[40:41]
; GFX1250-SDAG-NEXT: s_clause 0x7
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[28:31], s[0:1] offset:96
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[24:27], s[0:1] offset:112
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[16:19], s[0:1] offset:64
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[12:15], s[0:1] offset:80
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[20:23], s[0:1] offset:32
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[8:11], s[0:1] offset:48
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[4:7], s[0:1]
-; GFX1250-SDAG-NEXT: global_store_b128 v32, v[0:3], s[0:1] offset:16
+; GFX1250-SDAG-NEXT: global_store_b128 v34, v[16:19], s[34:35] offset:96
+; GFX1250-SDAG-NEXT: global_store_b128 v34, v[12:15], s[34:35] offset:112
+; GFX1250-SDAG-NEXT: global_store_b128 v34, v[8:11], s[34:35] offset:64
+; GFX1250-SDAG-NEXT: global_store_b128 v34, v[4:7], s[34:35] offset:80
+; GFX1250-SDAG-NEXT: global_store_b128 v34, v[20:23], s[34:35] offset:32
+; GFX1250-SDAG-NEXT: global_store_b128 v34, v[24:27], s[34:35] offset:48
+; GFX1250-SDAG-NEXT: global_store_b128 v34, v[0:3], s[34:35]
+; GFX1250-SDAG-NEXT: global_store_b128 v34, v[28:31], s[34:35] offset:16
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: fma_v32_vs:
@@ -2351,54 +2497,70 @@ define amdgpu_kernel void @fma_v32_vs(ptr addrspace(1) %a, <32 x float> %x) {
; GFX1250-GISEL-NEXT: s_load_b64 s[34:35], s[4:5], 0x24
; GFX1250-GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_lshlrev_b32_e32 v32, 7, v0
+; GFX1250-GISEL-NEXT: v_lshlrev_b32_e32 v56, 7, v0
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
; GFX1250-GISEL-NEXT: s_clause 0x7
-; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v32, s[34:35]
-; GFX1250-GISEL-NEXT: global_load_b128 v[4:7], v32, s[34:35] offset:16
-; GFX1250-GISEL-NEXT: global_load_b128 v[8:11], v32, s[34:35] offset:32
-; GFX1250-GISEL-NEXT: global_load_b128 v[12:15], v32, s[34:35] offset:48
-; GFX1250-GISEL-NEXT: global_load_b128 v[16:19], v32, s[34:35] offset:64
-; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v32, s[34:35] offset:80
-; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v32, s[34:35] offset:96
-; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v32, s[34:35] offset:112
+; GFX1250-GISEL-NEXT: global_load_b128 v[0:3], v56, s[34:35]
+; GFX1250-GISEL-NEXT: global_load_b128 v[4:7], v56, s[34:35] offset:16
+; GFX1250-GISEL-NEXT: global_load_b128 v[8:11], v56, s[34:35] offset:32
+; GFX1250-GISEL-NEXT: global_load_b128 v[12:15], v56, s[34:35] offset:48
+; GFX1250-GISEL-NEXT: global_load_b128 v[16:19], v56, s[34:35] offset:64
+; GFX1250-GISEL-NEXT: global_load_b128 v[20:23], v56, s[34:35] offset:80
+; GFX1250-GISEL-NEXT: global_load_b128 v[24:27], v56, s[34:35] offset:96
+; GFX1250-GISEL-NEXT: global_load_b128 v[28:31], v56, s[34:35] offset:112
; GFX1250-GISEL-NEXT: s_load_b512 s[16:31], s[4:5], 0xa4
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_load_b512 s[0:15], s[4:5], 0xe4
-; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x7
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[0:1], v[0:1], s[16:17], s[16:17]
-; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[2:3], v[2:3], s[18:19], s[18:19]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[16:17]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[18:19]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[20:21]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[22:23]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[40:41], s[24:25]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[42:43], s[26:27]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[44:45], s[28:29]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[46:47], s[30:31]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[48:49], s[0:1]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[50:51], s[2:3]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[52:53], s[4:5]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[54:55], s[6:7]
+; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x7
+; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[0:1], v[0:1], v[32:33], v[32:33]
+; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[2:3], v[2:3], v[34:35], v[34:35]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[32:33], s[8:9]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[34:35], s[10:11]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x6
-; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[4:5], v[4:5], s[20:21], s[20:21]
-; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[6:7], v[6:7], s[22:23], s[22:23]
+; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[4:5], v[4:5], v[36:37], v[36:37]
+; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[6:7], v[6:7], v[38:39], v[38:39]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[36:37], s[12:13]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[38:39], s[14:15]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x5
-; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[8:9], v[8:9], s[24:25], s[24:25]
-; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[10:11], v[10:11], s[26:27], s[26:27]
+; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[8:9], v[8:9], v[40:41], v[40:41]
+; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[10:11], v[10:11], v[42:43], v[42:43]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x4
-; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[12:13], v[12:13], s[28:29], s[28:29]
-; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[14:15], v[14:15], s[30:31], s[30:31]
+; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[12:13], v[12:13], v[44:45], v[44:45]
+; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[14:15], v[14:15], v[46:47], v[46:47]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x3
-; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[16:17], v[16:17], s[0:1], s[0:1]
-; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[18:19], v[18:19], s[2:3], s[2:3]
+; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[16:17], v[16:17], v[48:49], v[48:49]
+; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[18:19], v[18:19], v[50:51], v[50:51]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x2
-; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[20:21], v[20:21], s[4:5], s[4:5]
-; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[22:23], v[22:23], s[6:7], s[6:7]
+; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[20:21], v[20:21], v[52:53], v[52:53]
+; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[22:23], v[22:23], v[54:55], v[54:55]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x1
-; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[24:25], v[24:25], s[8:9], s[8:9]
-; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[26:27], v[26:27], s[10:11], s[10:11]
+; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[24:25], v[24:25], v[32:33], v[32:33]
+; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[26:27], v[26:27], v[34:35], v[34:35]
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[28:29], v[28:29], s[12:13], s[12:13]
-; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[30:31], v[30:31], s[14:15], s[14:15]
+; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[28:29], v[28:29], v[36:37], v[36:37]
+; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[30:31], v[30:31], v[38:39], v[38:39]
; GFX1250-GISEL-NEXT: s_clause 0x7
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[0:3], s[34:35]
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[4:7], s[34:35] offset:16
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[8:11], s[34:35] offset:32
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[12:15], s[34:35] offset:48
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[16:19], s[34:35] offset:64
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[20:23], s[34:35] offset:80
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[24:27], s[34:35] offset:96
-; GFX1250-GISEL-NEXT: global_store_b128 v32, v[28:31], s[34:35] offset:112
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[0:3], s[34:35]
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[4:7], s[34:35] offset:16
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[8:11], s[34:35] offset:32
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[12:15], s[34:35] offset:48
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[16:19], s[34:35] offset:64
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[20:23], s[34:35] offset:80
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[24:27], s[34:35] offset:96
+; GFX1250-GISEL-NEXT: global_store_b128 v56, v[28:31], s[34:35] offset:112
; GFX1250-GISEL-NEXT: s_endpgm
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds <32 x float>, ptr addrspace(1) %a, i32 %id
@@ -2488,17 +2650,19 @@ define amdgpu_kernel void @fma_v2_v_imm(ptr addrspace(1) %a) {
; GFX1250-GISEL-LABEL: fma_v2_v_imm:
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
-; GFX1250-GISEL-NEXT: v_and_b32_e32 v2, 0x3ff, v0
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v6, 0x3ff, v0
; GFX1250-GISEL-NEXT: s_mov_b32 s2, 0x42c80000
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_mov_b32 s4, 0x43480000
; GFX1250-GISEL-NEXT: s_mov_b32 s3, s2
; GFX1250-GISEL-NEXT: s_mov_b32 s5, s4
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[4:5]
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset
+; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v6, s[0:1] scale_offset
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[0:1], v[0:1], s[2:3], s[4:5]
-; GFX1250-GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset
+; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[0:1], v[0:1], v[2:3], v[4:5]
+; GFX1250-GISEL-NEXT: global_store_b64 v6, v[0:1], s[0:1] scale_offset
; GFX1250-GISEL-NEXT: s_endpgm
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id
@@ -2653,17 +2817,19 @@ define amdgpu_kernel void @fma_v2_v_lit_splat(ptr addrspace(1) %a) {
; GFX1250-GISEL-LABEL: fma_v2_v_lit_splat:
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
-; GFX1250-GISEL-NEXT: v_and_b32_e32 v2, 0x3ff, v0
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v6, 0x3ff, v0
; GFX1250-GISEL-NEXT: s_mov_b32 s2, 4.0
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_mov_b32 s4, 1.0
; GFX1250-GISEL-NEXT: s_mov_b32 s3, s2
; GFX1250-GISEL-NEXT: s_mov_b32 s5, s4
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[4:5]
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset
+; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v6, s[0:1] scale_offset
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[0:1], v[0:1], s[2:3], s[4:5]
-; GFX1250-GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset
+; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[0:1], v[0:1], v[2:3], v[4:5]
+; GFX1250-GISEL-NEXT: global_store_b64 v6, v[0:1], s[0:1] scale_offset
; GFX1250-GISEL-NEXT: s_endpgm
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id
@@ -2740,29 +2906,30 @@ define amdgpu_kernel void @fma_v2_v_unfoldable_lit(ptr addrspace(1) %a) {
; GFX1250-SDAG-LABEL: fma_v2_v_unfoldable_lit:
; GFX1250-SDAG: ; %bb.0:
; GFX1250-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
-; GFX1250-SDAG-NEXT: v_and_b32_e32 v2, 0x3ff, v0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[2:3], lit64(0x400000003f800000)
-; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
-; GFX1250-SDAG-NEXT: s_mov_b64 s[4:5], lit64(0x4040000040800000)
+; GFX1250-SDAG-NEXT: v_and_b32_e32 v6, 0x3ff, v0
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[2:3], lit64(0x4040000040800000)
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[4:5], lit64(0x400000003f800000)
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
-; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset
+; GFX1250-SDAG-NEXT: global_load_b64 v[0:1], v6, s[0:1] scale_offset
; GFX1250-SDAG-NEXT: s_wait_loadcnt 0x0
-; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[0:1], v[0:1], s[4:5], s[2:3]
-; GFX1250-SDAG-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset
+; GFX1250-SDAG-NEXT: v_pk_fma_f32 v[0:1], v[0:1], v[2:3], v[4:5]
+; GFX1250-SDAG-NEXT: global_store_b64 v6, v[0:1], s[0:1] scale_offset
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: fma_v2_v_unfoldable_lit:
; GFX1250-GISEL: ; %bb.0:
; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
-; GFX1250-GISEL-NEXT: v_and_b32_e32 v2, 0x3ff, v0
+; GFX1250-GISEL-NEXT: v_and_b32_e32 v6, 0x3ff, v0
; GFX1250-GISEL-NEXT: s_mov_b64 s[2:3], lit64(0x4040000040800000)
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_mov_b64 s[4:5], lit64(0x400000003f800000)
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[4:5], s[4:5]
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v2, s[0:1] scale_offset
+; GFX1250-GISEL-NEXT: global_load_b64 v[0:1], v6, s[0:1] scale_offset
; GFX1250-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[0:1], v[0:1], s[2:3], s[4:5]
-; GFX1250-GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1] scale_offset
+; GFX1250-GISEL-NEXT: v_pk_fma_f32 v[0:1], v[0:1], v[2:3], v[4:5]
+; GFX1250-GISEL-NEXT: global_store_b64 v6, v[0:1], s[0:1] scale_offset
; GFX1250-GISEL-NEXT: s_endpgm
%id = tail call i32 @llvm.amdgcn.workitem.id.x()
%gep = getelementptr inbounds <2 x float>, ptr addrspace(1) %a, i32 %id
@@ -3268,20 +3435,22 @@ define amdgpu_kernel void @fadd_fadd_fsub_0(<2 x float> %arg) {
; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_3) | instskip(NEXT) | instid1(SALU_CYCLE_3)
; GFX1250-SDAG-NEXT: s_add_f32 s1, s1, 0
; GFX1250-SDAG-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GFX1250-SDAG-NEXT: flat_store_b64 v[0:1], v[0:1]
+; GFX1250-SDAG-NEXT: flat_store_b64 v[0:1], v[0:1] scope:SCOPE_SE
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: fadd_fadd_fsub_0:
; GFX1250-GISEL: ; %bb.0: ; %bb
; GFX1250-GISEL-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], s[0:1], 0
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v2, s0
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], 0
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, v1
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v0, v1
; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], 0
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1250-GISEL-NEXT: v_mov_b32_e32 v3, v0
-; GFX1250-GISEL-NEXT: flat_store_b64 v[0:1], v[2:3]
+; GFX1250-GISEL-NEXT: flat_store_b64 v[0:1], v[2:3] scope:SCOPE_SE
; GFX1250-GISEL-NEXT: s_endpgm
bb:
%i12 = fadd <2 x float> zeroinitializer, %arg
@@ -3363,15 +3532,16 @@ define amdgpu_kernel void @fadd_fadd_fsub(<2 x float> %arg, <2 x float> %arg1, p
; GFX1250-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
; GFX1250-SDAG-NEXT: s_wait_xcnt 0x0
; GFX1250-SDAG-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
-; GFX1250-SDAG-NEXT: v_mov_b32_e32 v4, 0
; GFX1250-SDAG-NEXT: s_wait_kmcnt 0x0
-; GFX1250-SDAG-NEXT: s_add_f32 s6, s1, s3
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], s[2:3], s[6:7] op_sel_hi:[1,0]
-; GFX1250-SDAG-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v3, v0
-; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[2:3], s[2:3] neg_lo:[0,1] neg_hi:[0,1]
-; GFX1250-SDAG-NEXT: global_store_b64 v4, v[0:1], s[4:5]
+; GFX1250-SDAG-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
+; GFX1250-SDAG-NEXT: s_add_f32 s2, s1, s3
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_3)
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[2:3], v[0:1], s[2:3] op_sel_hi:[1,0]
+; GFX1250-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX1250-SDAG-NEXT: v_dual_mov_b32 v4, s0 :: v_dual_mov_b32 v5, v2
+; GFX1250-SDAG-NEXT: v_mov_b32_e32 v2, 0
+; GFX1250-SDAG-NEXT: v_pk_add_f32 v[0:1], v[4:5], v[0:1] neg_lo:[0,1] neg_hi:[0,1]
+; GFX1250-SDAG-NEXT: global_store_b64 v2, v[0:1], s[4:5]
; GFX1250-SDAG-NEXT: s_endpgm
;
; GFX1250-GISEL-LABEL: fadd_fadd_fsub:
@@ -3380,13 +3550,16 @@ define amdgpu_kernel void @fadd_fadd_fsub(<2 x float> %arg, <2 x float> %arg1, p
; GFX1250-GISEL-NEXT: s_wait_xcnt 0x0
; GFX1250-GISEL-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], s[0:1], s[2:3]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[2:3], s[2:3]
; GFX1250-GISEL-NEXT: s_sub_f32 s0, s0, s2
-; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_3)
-; GFX1250-GISEL-NEXT: v_dual_mov_b32 v0, v1 :: v_dual_mov_b32 v2, s0
; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], s[2:3], v[0:1]
-; GFX1250-GISEL-NEXT: v_dual_subrev_f32 v3, s3, v0 :: v_dual_mov_b32 v0, 0
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[0:1], v[2:3]
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, v1
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_pk_add_f32 v[0:1], v[2:3], v[0:1]
+; GFX1250-GISEL-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_subrev_f32 v3, s3, v0
+; GFX1250-GISEL-NEXT: v_mov_b32_e32 v0, 0
; GFX1250-GISEL-NEXT: global_store_b64 v0, v[2:3], s[4:5]
; GFX1250-GISEL-NEXT: s_endpgm
bb:
@@ -3593,7 +3766,9 @@ define amdgpu_kernel void @fneg_v2f32_scalar(ptr addrspace(1) %a, <2 x float> %x
; GFX1250-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
; GFX1250-GISEL-NEXT: v_mov_b32_e32 v2, 0
; GFX1250-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], 1.0, s[2:3] op_sel_hi:[0,1] neg_lo:[0,1] neg_hi:[0,1]
+; GFX1250-GISEL-NEXT: v_mov_b64_e32 v[0:1], s[2:3]
+; GFX1250-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-GISEL-NEXT: v_pk_mul_f32 v[0:1], 1.0, v[0:1] op_sel_hi:[0,1] neg_lo:[0,1] neg_hi:[0,1]
; GFX1250-GISEL-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX1250-GISEL-NEXT: s_endpgm
%fneg = fsub <2 x float> <float -0.0, float -0.0>, %x
diff --git a/llvm/test/CodeGen/AMDGPU/readcyclecounter.ll b/llvm/test/CodeGen/AMDGPU/readcyclecounter.ll
index 131c5f3..f67cbe3 100644
--- a/llvm/test/CodeGen/AMDGPU/readcyclecounter.ll
+++ b/llvm/test/CodeGen/AMDGPU/readcyclecounter.ll
@@ -10,6 +10,8 @@
; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1100 -amdgpu-enable-vopd=0 < %s | FileCheck -check-prefixes=GETREG,GETREG-GISEL -check-prefix=GCN %s
; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck -check-prefixes=GCN,GFX12 %s
; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1200 < %s | FileCheck -check-prefixes=GCN,GFX12 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GCN,GFX1250 %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GCN,GFX1250 %s
declare i64 @llvm.readcyclecounter() #0
@@ -21,6 +23,7 @@ declare i64 @llvm.readcyclecounter() #0
; GFX12: s_getreg_b32 [[HI2:s[0-9]+]], hwreg(HW_REG_SHADER_CYCLES_HI)
; GFX12: s_cmp_eq_u32 [[HI1]], [[HI2]]
; GFX12: s_cselect_b32 {{s[0-9]+}}, [[LO1]], 0
+; GFX1250: s_get_shader_cycles_u64 s{{\[[0-9]+:[0-9]+\]}}
; GCN-DAG: kmcnt
; MEMTIME: store_dwordx2
; SIVI-NOT: kmcnt
@@ -53,6 +56,7 @@ define amdgpu_kernel void @test_readcyclecounter(ptr addrspace(1) %out) #0 {
; GFX12: s_getreg_b32 [[HI1:s[0-9]+]], hwreg(HW_REG_SHADER_CYCLES_HI)
; GFX12: s_getreg_b32 [[LO1:s[0-9]+]], hwreg(HW_REG_SHADER_CYCLES_LO)
; GFX12: s_getreg_b32 [[HI2:s[0-9]+]], hwreg(HW_REG_SHADER_CYCLES_HI)
+; GFX1250: s_get_shader_cycles_u64 s{{\[[0-9]+:[0-9]+\]}}
; GCN-DAG: s_load_{{dword|b32|b64}}
; GETREG-DAG: s_getreg_b32 s{{[0-9]+}}, hwreg(HW_REG_SHADER_CYCLES, 0, 20)
; GFX12: s_cmp_eq_u32 [[HI1]], [[HI2]]
diff --git a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll
index 0c6339e..0b43ff2 100644
--- a/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll
+++ b/llvm/test/CodeGen/AMDGPU/rewrite-vgpr-mfma-to-agpr.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mcpu=gfx90a < %s | FileCheck %s
+; RUN: llc -mcpu=gfx942 -amdgpu-mfma-vgpr-form < %s | FileCheck %s
target triple = "amdgcn-amd-amdhsa"
@@ -7,7 +7,10 @@ define amdgpu_kernel void @test_mfma_f32_32x32x1f32_rewrite_vgpr_mfma(ptr addrsp
; CHECK-LABEL: test_mfma_f32_32x32x1f32_rewrite_vgpr_mfma:
; CHECK: ; %bb.0: ; %bb
; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; CHECK-NEXT: v_lshlrev_b32_e32 v0, 7, v0
+; CHECK-NEXT: v_mov_b32_e32 v32, 1.0
+; CHECK-NEXT: v_mov_b32_e32 v33, 2.0
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: global_load_dwordx4 v[28:31], v0, s[0:1] offset:112
; CHECK-NEXT: global_load_dwordx4 v[24:27], v0, s[0:1] offset:96
@@ -18,117 +21,58 @@ define amdgpu_kernel void @test_mfma_f32_32x32x1f32_rewrite_vgpr_mfma(ptr addrsp
; CHECK-NEXT: global_load_dwordx4 v[4:7], v0, s[0:1] offset:16
; CHECK-NEXT: s_nop 0
; CHECK-NEXT: global_load_dwordx4 v[0:3], v0, s[0:1]
+; CHECK-NEXT: v_accvgpr_write_b32 a0, 1.0
+; CHECK-NEXT: v_accvgpr_write_b32 a1, 2.0
; CHECK-NEXT: s_waitcnt vmcnt(0)
-; CHECK-NEXT: v_accvgpr_write_b32 a0, v0
-; CHECK-NEXT: v_accvgpr_write_b32 a1, v1
-; CHECK-NEXT: v_accvgpr_write_b32 a2, v2
-; CHECK-NEXT: v_accvgpr_write_b32 a3, v3
-; CHECK-NEXT: v_accvgpr_write_b32 a4, v4
-; CHECK-NEXT: v_accvgpr_write_b32 a5, v5
-; CHECK-NEXT: v_accvgpr_write_b32 a6, v6
-; CHECK-NEXT: v_accvgpr_write_b32 a7, v7
-; CHECK-NEXT: v_accvgpr_write_b32 a8, v8
-; CHECK-NEXT: v_accvgpr_write_b32 a9, v9
-; CHECK-NEXT: v_accvgpr_write_b32 a10, v10
-; CHECK-NEXT: v_accvgpr_write_b32 a11, v11
-; CHECK-NEXT: v_accvgpr_write_b32 a12, v12
-; CHECK-NEXT: v_accvgpr_write_b32 a13, v13
-; CHECK-NEXT: v_accvgpr_write_b32 a14, v14
-; CHECK-NEXT: v_accvgpr_write_b32 a15, v15
-; CHECK-NEXT: v_accvgpr_write_b32 a16, v16
-; CHECK-NEXT: v_accvgpr_write_b32 a17, v17
-; CHECK-NEXT: v_accvgpr_write_b32 a18, v18
-; CHECK-NEXT: v_accvgpr_write_b32 a19, v19
-; CHECK-NEXT: v_accvgpr_write_b32 a20, v20
-; CHECK-NEXT: v_accvgpr_write_b32 a21, v21
-; CHECK-NEXT: v_accvgpr_write_b32 a22, v22
-; CHECK-NEXT: v_accvgpr_write_b32 a23, v23
-; CHECK-NEXT: v_accvgpr_write_b32 a24, v24
-; CHECK-NEXT: v_accvgpr_write_b32 a25, v25
-; CHECK-NEXT: v_accvgpr_write_b32 a26, v26
-; CHECK-NEXT: v_accvgpr_write_b32 a27, v27
-; CHECK-NEXT: v_accvgpr_write_b32 a28, v28
-; CHECK-NEXT: v_accvgpr_write_b32 a29, v29
-; CHECK-NEXT: v_accvgpr_write_b32 a30, v30
-; CHECK-NEXT: v_accvgpr_write_b32 a31, v31
-; CHECK-NEXT: v_mov_b32_e32 v0, 1.0
-; CHECK-NEXT: v_mov_b32_e32 v1, 2.0
-; CHECK-NEXT: s_nop 1
-; CHECK-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v0, v1, a[0:31]
-; CHECK-NEXT: v_mfma_f32_32x32x1f32 a[32:63], v0, v1, a[0:31]
+; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v32, v33, v[0:31]
+; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[32:63], a0, a1, v[0:31]
; CHECK-NEXT: s_nop 7
; CHECK-NEXT: s_nop 7
-; CHECK-NEXT: s_nop 2
-; CHECK-NEXT: v_accvgpr_read_b32 v4, a59
-; CHECK-NEXT: v_accvgpr_read_b32 v5, a58
-; CHECK-NEXT: v_accvgpr_read_b32 v6, a57
-; CHECK-NEXT: v_accvgpr_read_b32 v7, a56
-; CHECK-NEXT: v_accvgpr_read_b32 v8, a55
-; CHECK-NEXT: v_accvgpr_read_b32 v9, a54
-; CHECK-NEXT: v_accvgpr_read_b32 v10, a53
-; CHECK-NEXT: v_accvgpr_read_b32 v11, a52
-; CHECK-NEXT: v_accvgpr_read_b32 v12, a51
-; CHECK-NEXT: v_accvgpr_read_b32 v13, a50
-; CHECK-NEXT: v_accvgpr_read_b32 v14, a49
-; CHECK-NEXT: v_accvgpr_read_b32 v15, a48
-; CHECK-NEXT: v_accvgpr_read_b32 v16, a47
-; CHECK-NEXT: v_accvgpr_read_b32 v17, a46
-; CHECK-NEXT: v_accvgpr_read_b32 v18, a45
-; CHECK-NEXT: v_accvgpr_read_b32 v19, a44
-; CHECK-NEXT: v_accvgpr_read_b32 v20, a43
-; CHECK-NEXT: v_accvgpr_read_b32 v21, a42
-; CHECK-NEXT: v_accvgpr_read_b32 v22, a41
-; CHECK-NEXT: v_accvgpr_read_b32 v23, a40
-; CHECK-NEXT: v_accvgpr_read_b32 v24, a39
-; CHECK-NEXT: v_accvgpr_read_b32 v25, a38
-; CHECK-NEXT: v_accvgpr_read_b32 v26, a37
-; CHECK-NEXT: v_accvgpr_read_b32 v27, a36
-; CHECK-NEXT: v_accvgpr_read_b32 v28, a35
-; CHECK-NEXT: v_accvgpr_read_b32 v29, a34
-; CHECK-NEXT: v_accvgpr_mov_b32 a2, a32
-; CHECK-NEXT: v_accvgpr_mov_b32 a3, a33
-; CHECK-NEXT: v_accvgpr_write_b32 a4, v29
-; CHECK-NEXT: v_accvgpr_write_b32 a5, v28
-; CHECK-NEXT: v_accvgpr_write_b32 a6, v27
-; CHECK-NEXT: v_accvgpr_write_b32 a7, v26
-; CHECK-NEXT: v_accvgpr_write_b32 a8, v25
-; CHECK-NEXT: v_accvgpr_write_b32 a9, v24
-; CHECK-NEXT: v_accvgpr_write_b32 a10, v23
-; CHECK-NEXT: v_accvgpr_write_b32 a11, v22
-; CHECK-NEXT: v_accvgpr_write_b32 a12, v21
-; CHECK-NEXT: v_accvgpr_write_b32 a13, v20
-; CHECK-NEXT: v_accvgpr_write_b32 a14, v19
-; CHECK-NEXT: v_accvgpr_write_b32 a15, v18
-; CHECK-NEXT: v_accvgpr_write_b32 a16, v17
-; CHECK-NEXT: v_accvgpr_write_b32 a17, v16
-; CHECK-NEXT: v_accvgpr_write_b32 a18, v15
-; CHECK-NEXT: v_accvgpr_write_b32 a19, v14
-; CHECK-NEXT: v_accvgpr_write_b32 a20, v13
-; CHECK-NEXT: v_accvgpr_write_b32 a21, v12
-; CHECK-NEXT: v_accvgpr_write_b32 a22, v11
-; CHECK-NEXT: v_accvgpr_write_b32 a23, v10
-; CHECK-NEXT: v_accvgpr_write_b32 a24, v9
-; CHECK-NEXT: v_accvgpr_write_b32 a25, v8
-; CHECK-NEXT: v_accvgpr_write_b32 a26, v7
-; CHECK-NEXT: v_accvgpr_write_b32 a27, v6
-; CHECK-NEXT: v_accvgpr_write_b32 a28, v5
-; CHECK-NEXT: v_accvgpr_write_b32 a29, v4
-; CHECK-NEXT: v_accvgpr_mov_b32 a30, a60
-; CHECK-NEXT: v_accvgpr_mov_b32 a31, a61
; CHECK-NEXT: s_nop 1
-; CHECK-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v0, v1, a[0:31]
-; CHECK-NEXT: v_mov_b32_e32 v0, 0
+; CHECK-NEXT: v_mov_b32_e32 v2, v32
+; CHECK-NEXT: v_mov_b32_e32 v3, v33
+; CHECK-NEXT: v_mov_b32_e32 v4, v34
+; CHECK-NEXT: v_mov_b32_e32 v5, v35
+; CHECK-NEXT: v_mov_b32_e32 v6, v36
+; CHECK-NEXT: v_mov_b32_e32 v7, v37
+; CHECK-NEXT: v_mov_b32_e32 v8, v38
+; CHECK-NEXT: v_mov_b32_e32 v9, v39
+; CHECK-NEXT: v_mov_b32_e32 v10, v40
+; CHECK-NEXT: v_mov_b32_e32 v11, v41
+; CHECK-NEXT: v_mov_b32_e32 v12, v42
+; CHECK-NEXT: v_mov_b32_e32 v13, v43
+; CHECK-NEXT: v_mov_b32_e32 v14, v44
+; CHECK-NEXT: v_mov_b32_e32 v15, v45
+; CHECK-NEXT: v_mov_b32_e32 v16, v46
+; CHECK-NEXT: v_mov_b32_e32 v17, v47
+; CHECK-NEXT: v_mov_b32_e32 v18, v48
+; CHECK-NEXT: v_mov_b32_e32 v19, v49
+; CHECK-NEXT: v_mov_b32_e32 v20, v50
+; CHECK-NEXT: v_mov_b32_e32 v21, v51
+; CHECK-NEXT: v_mov_b32_e32 v22, v52
+; CHECK-NEXT: v_mov_b32_e32 v23, v53
+; CHECK-NEXT: v_mov_b32_e32 v24, v54
+; CHECK-NEXT: v_mov_b32_e32 v25, v55
+; CHECK-NEXT: v_mov_b32_e32 v26, v56
+; CHECK-NEXT: v_mov_b32_e32 v27, v57
+; CHECK-NEXT: v_mov_b32_e32 v28, v58
+; CHECK-NEXT: v_mov_b32_e32 v29, v59
+; CHECK-NEXT: v_mov_b32_e32 v30, v60
+; CHECK-NEXT: v_mov_b32_e32 v31, v61
+; CHECK-NEXT: v_mov_b32_e32 v32, 0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], a0, a1, v[0:31]
; CHECK-NEXT: s_nop 7
; CHECK-NEXT: s_nop 7
; CHECK-NEXT: s_nop 1
-; CHECK-NEXT: global_store_dwordx4 v0, a[24:27], s[0:1] offset:96
-; CHECK-NEXT: global_store_dwordx4 v0, a[28:31], s[0:1] offset:112
-; CHECK-NEXT: global_store_dwordx4 v0, a[16:19], s[0:1] offset:64
-; CHECK-NEXT: global_store_dwordx4 v0, a[20:23], s[0:1] offset:80
-; CHECK-NEXT: global_store_dwordx4 v0, a[8:11], s[0:1] offset:32
-; CHECK-NEXT: global_store_dwordx4 v0, a[12:15], s[0:1] offset:48
-; CHECK-NEXT: global_store_dwordx4 v0, a[0:3], s[0:1]
-; CHECK-NEXT: global_store_dwordx4 v0, a[4:7], s[0:1] offset:16
+; CHECK-NEXT: global_store_dwordx4 v32, v[24:27], s[0:1] offset:96
+; CHECK-NEXT: global_store_dwordx4 v32, v[28:31], s[0:1] offset:112
+; CHECK-NEXT: global_store_dwordx4 v32, v[16:19], s[0:1] offset:64
+; CHECK-NEXT: global_store_dwordx4 v32, v[20:23], s[0:1] offset:80
+; CHECK-NEXT: global_store_dwordx4 v32, v[8:11], s[0:1] offset:32
+; CHECK-NEXT: global_store_dwordx4 v32, v[12:15], s[0:1] offset:48
+; CHECK-NEXT: global_store_dwordx4 v32, v[0:3], s[0:1]
+; CHECK-NEXT: global_store_dwordx4 v32, v[4:7], s[0:1] offset:16
; CHECK-NEXT: s_endpgm
bb:
%id = call i32 @llvm.amdgcn.workitem.id.x()
@@ -146,35 +90,36 @@ define amdgpu_kernel void @test_mfma_f32_32x32x1f32_rewrite_vgpr_mfma_noshuffle(
; CHECK-LABEL: test_mfma_f32_32x32x1f32_rewrite_vgpr_mfma_noshuffle:
; CHECK: ; %bb.0: ; %bb
; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-NEXT: v_and_b32_e32 v0, 0x3ff, v0
; CHECK-NEXT: v_lshlrev_b32_e32 v0, 7, v0
-; CHECK-NEXT: v_mov_b32_e32 v1, 2.0
+; CHECK-NEXT: v_mov_b32_e32 v32, 1.0
+; CHECK-NEXT: v_mov_b32_e32 v33, 2.0
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: global_load_dwordx4 a[28:31], v0, s[0:1] offset:112
-; CHECK-NEXT: global_load_dwordx4 a[24:27], v0, s[0:1] offset:96
-; CHECK-NEXT: global_load_dwordx4 a[20:23], v0, s[0:1] offset:80
-; CHECK-NEXT: global_load_dwordx4 a[16:19], v0, s[0:1] offset:64
-; CHECK-NEXT: global_load_dwordx4 a[12:15], v0, s[0:1] offset:48
-; CHECK-NEXT: global_load_dwordx4 a[8:11], v0, s[0:1] offset:32
-; CHECK-NEXT: global_load_dwordx4 a[4:7], v0, s[0:1] offset:16
-; CHECK-NEXT: global_load_dwordx4 a[0:3], v0, s[0:1]
-; CHECK-NEXT: v_mov_b32_e32 v0, 1.0
-; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: global_load_dwordx4 v[28:31], v0, s[0:1] offset:112
+; CHECK-NEXT: global_load_dwordx4 v[24:27], v0, s[0:1] offset:96
+; CHECK-NEXT: global_load_dwordx4 v[20:23], v0, s[0:1] offset:80
+; CHECK-NEXT: global_load_dwordx4 v[16:19], v0, s[0:1] offset:64
+; CHECK-NEXT: global_load_dwordx4 v[12:15], v0, s[0:1] offset:48
+; CHECK-NEXT: global_load_dwordx4 v[8:11], v0, s[0:1] offset:32
+; CHECK-NEXT: global_load_dwordx4 v[4:7], v0, s[0:1] offset:16
; CHECK-NEXT: s_nop 0
-; CHECK-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v0, v1, a[0:31]
-; CHECK-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v0, v1, a[0:31]
-; CHECK-NEXT: v_mfma_f32_32x32x1f32 a[0:31], v0, v1, a[0:31]
-; CHECK-NEXT: v_mov_b32_e32 v0, 0
+; CHECK-NEXT: global_load_dwordx4 v[0:3], v0, s[0:1]
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v32, v33, v[0:31]
+; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v32, v33, v[0:31]
+; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v32, v33, v[0:31]
+; CHECK-NEXT: v_mov_b32_e32 v32, 0
; CHECK-NEXT: s_nop 7
; CHECK-NEXT: s_nop 7
-; CHECK-NEXT: s_nop 1
-; CHECK-NEXT: global_store_dwordx4 v0, a[24:27], s[0:1] offset:96
-; CHECK-NEXT: global_store_dwordx4 v0, a[28:31], s[0:1] offset:112
-; CHECK-NEXT: global_store_dwordx4 v0, a[16:19], s[0:1] offset:64
-; CHECK-NEXT: global_store_dwordx4 v0, a[20:23], s[0:1] offset:80
-; CHECK-NEXT: global_store_dwordx4 v0, a[8:11], s[0:1] offset:32
-; CHECK-NEXT: global_store_dwordx4 v0, a[12:15], s[0:1] offset:48
-; CHECK-NEXT: global_store_dwordx4 v0, a[0:3], s[0:1]
-; CHECK-NEXT: global_store_dwordx4 v0, a[4:7], s[0:1] offset:16
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: global_store_dwordx4 v32, v[24:27], s[0:1] offset:96
+; CHECK-NEXT: global_store_dwordx4 v32, v[28:31], s[0:1] offset:112
+; CHECK-NEXT: global_store_dwordx4 v32, v[16:19], s[0:1] offset:64
+; CHECK-NEXT: global_store_dwordx4 v32, v[20:23], s[0:1] offset:80
+; CHECK-NEXT: global_store_dwordx4 v32, v[8:11], s[0:1] offset:32
+; CHECK-NEXT: global_store_dwordx4 v32, v[12:15], s[0:1] offset:48
+; CHECK-NEXT: global_store_dwordx4 v32, v[0:3], s[0:1]
+; CHECK-NEXT: global_store_dwordx4 v32, v[4:7], s[0:1] offset:16
; CHECK-NEXT: s_endpgm
bb:
%id = call i32 @llvm.amdgcn.workitem.id.x()
@@ -187,9 +132,77 @@ bb:
ret void
}
+define amdgpu_kernel void @test_mfma_f32_32x32x1f32_rewrite_vgpr_mfma_imm0_src2(ptr addrspace(1) %arg) #0 {
+; CHECK-LABEL: test_mfma_f32_32x32x1f32_rewrite_vgpr_mfma_imm0_src2:
+; CHECK: ; %bb.0: ; %bb
+; CHECK-NEXT: v_mov_b32_e32 v32, 1.0
+; CHECK-NEXT: v_mov_b32_e32 v33, 2.0
+; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v32, v33, 0
+; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v32, v33, v[0:31]
+; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v32, v33, v[0:31]
+; CHECK-NEXT: v_mov_b32_e32 v32, 0
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_nop 7
+; CHECK-NEXT: s_nop 7
+; CHECK-NEXT: global_store_dwordx4 v32, v[28:31], s[0:1] offset:112
+; CHECK-NEXT: global_store_dwordx4 v32, v[24:27], s[0:1] offset:96
+; CHECK-NEXT: global_store_dwordx4 v32, v[20:23], s[0:1] offset:80
+; CHECK-NEXT: global_store_dwordx4 v32, v[16:19], s[0:1] offset:64
+; CHECK-NEXT: global_store_dwordx4 v32, v[12:15], s[0:1] offset:48
+; CHECK-NEXT: global_store_dwordx4 v32, v[8:11], s[0:1] offset:32
+; CHECK-NEXT: global_store_dwordx4 v32, v[4:7], s[0:1] offset:16
+; CHECK-NEXT: global_store_dwordx4 v32, v[0:3], s[0:1]
+; CHECK-NEXT: s_endpgm
+bb:
+ %id = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr <32 x float>, ptr addrspace(1) %arg, i32 %id
+ %in.1 = load <32 x float>, ptr addrspace(1) %gep, align 128
+ %mai.1 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> zeroinitializer, i32 0, i32 0, i32 0)
+ %mai.2 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %mai.1, i32 0, i32 0, i32 0)
+ %mai.3 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %mai.2, i32 0, i32 0, i32 0)
+ store <32 x float> %mai.3, ptr addrspace(1) %arg, align 128
+ ret void
+}
+
+define amdgpu_kernel void @test_mfma_f32_32x32x1f32_rewrite_vgpr_mfma_imm1_src2(ptr addrspace(1) %arg) #0 {
+; CHECK-LABEL: test_mfma_f32_32x32x1f32_rewrite_vgpr_mfma_imm1_src2:
+; CHECK: ; %bb.0: ; %bb
+; CHECK-NEXT: v_mov_b32_e32 v32, 1.0
+; CHECK-NEXT: v_mov_b32_e32 v33, 2.0
+; CHECK-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
+; CHECK-NEXT: s_nop 0
+; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v32, v33, 1.0
+; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v32, v33, v[0:31]
+; CHECK-NEXT: v_mfma_f32_32x32x1_2b_f32 v[0:31], v32, v33, v[0:31]
+; CHECK-NEXT: v_mov_b32_e32 v32, 0
+; CHECK-NEXT: s_waitcnt lgkmcnt(0)
+; CHECK-NEXT: s_nop 7
+; CHECK-NEXT: s_nop 7
+; CHECK-NEXT: global_store_dwordx4 v32, v[28:31], s[0:1] offset:112
+; CHECK-NEXT: global_store_dwordx4 v32, v[24:27], s[0:1] offset:96
+; CHECK-NEXT: global_store_dwordx4 v32, v[20:23], s[0:1] offset:80
+; CHECK-NEXT: global_store_dwordx4 v32, v[16:19], s[0:1] offset:64
+; CHECK-NEXT: global_store_dwordx4 v32, v[12:15], s[0:1] offset:48
+; CHECK-NEXT: global_store_dwordx4 v32, v[8:11], s[0:1] offset:32
+; CHECK-NEXT: global_store_dwordx4 v32, v[4:7], s[0:1] offset:16
+; CHECK-NEXT: global_store_dwordx4 v32, v[0:3], s[0:1]
+; CHECK-NEXT: s_endpgm
+bb:
+ %id = call i32 @llvm.amdgcn.workitem.id.x()
+ %gep = getelementptr <32 x float>, ptr addrspace(1) %arg, i32 %id
+ %in.1 = load <32 x float>, ptr addrspace(1) %gep, align 128
+ %mai.1 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> splat (float 1.0), i32 0, i32 0, i32 0)
+ %mai.2 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %mai.1, i32 0, i32 0, i32 0)
+ %mai.3 = call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float 1.0, float 2.0, <32 x float> %mai.2, i32 0, i32 0, i32 0)
+ store <32 x float> %mai.3, ptr addrspace(1) %arg, align 128
+ ret void
+}
+
declare <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float, float, <32 x float>, i32 immarg, i32 immarg, i32 immarg) #1
declare noundef i32 @llvm.amdgcn.workitem.id.x() #2
-attributes #0 = { "amdgpu-flat-work-group-size"="1,256" "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "amdgpu-waves-per-eu"="4,4" }
+attributes #0 = { nounwind "amdgpu-flat-work-group-size"="1,256" "amdgpu-waves-per-eu"="4,4" }
attributes #1 = { convergent nocallback nofree nosync nounwind willreturn memory(none) }
attributes #2 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
diff --git a/llvm/test/CodeGen/AMDGPU/saddsat.ll b/llvm/test/CodeGen/AMDGPU/saddsat.ll
index 019eb2c..4995ce6 100644
--- a/llvm/test/CodeGen/AMDGPU/saddsat.ll
+++ b/llvm/test/CodeGen/AMDGPU/saddsat.ll
@@ -124,9 +124,8 @@ define i32 @v_saddsat_i32(i32 %lhs, i32 %rhs) {
; GFX6-NEXT: v_add_i32_e64 v1, s[4:5], v0, v1
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v1, v0
; GFX6-NEXT: v_ashrrev_i32_e32 v0, 31, v1
-; GFX6-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v1, -v0, s[4:5]
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_saddsat_i32:
@@ -136,9 +135,8 @@ define i32 @v_saddsat_i32(i32 %lhs, i32 %rhs) {
; GFX8-NEXT: v_add_u32_e64 v1, s[4:5], v0, v1
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v1, v0
; GFX8-NEXT: v_ashrrev_i32_e32 v0, 31, v1
-; GFX8-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v1, -v0, s[4:5]
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_saddsat_i32:
@@ -383,16 +381,14 @@ define <2 x i32> @v_saddsat_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
; GFX6-NEXT: v_add_i32_e64 v2, s[4:5], v0, v2
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v2, v0
; GFX6-NEXT: v_ashrrev_i32_e32 v0, 31, v2
-; GFX6-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v2, -v0, s[4:5]
; GFX6-NEXT: v_add_i32_e64 v2, s[4:5], v1, v3
; GFX6-NEXT: v_cmp_gt_i32_e32 vcc, 0, v3
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v2, v1
; GFX6-NEXT: v_ashrrev_i32_e32 v1, 31, v2
-; GFX6-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v2, -v1, s[4:5]
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_saddsat_v2i32:
@@ -402,16 +398,14 @@ define <2 x i32> @v_saddsat_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
; GFX8-NEXT: v_add_u32_e64 v2, s[4:5], v0, v2
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v2, v0
; GFX8-NEXT: v_ashrrev_i32_e32 v0, 31, v2
-; GFX8-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v2, -v0, s[4:5]
; GFX8-NEXT: v_add_u32_e64 v2, s[4:5], v1, v3
; GFX8-NEXT: v_cmp_gt_i32_e32 vcc, 0, v3
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v2, v1
; GFX8-NEXT: v_ashrrev_i32_e32 v1, 31, v2
-; GFX8-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v2, -v1, s[4:5]
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_saddsat_v2i32:
@@ -442,8 +436,7 @@ define i64 @v_saddsat_i64(i64 %lhs, i64 %rhs) {
; GFX6-NEXT: v_ashrrev_i32_e32 v1, 31, v5
; GFX6-NEXT: s_xor_b64 vcc, s[4:5], vcc
; GFX6-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
-; GFX6-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
-; GFX6-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v5, -v1, vcc
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_saddsat_i64:
@@ -456,8 +449,7 @@ define i64 @v_saddsat_i64(i64 %lhs, i64 %rhs) {
; GFX8-NEXT: v_ashrrev_i32_e32 v1, 31, v5
; GFX8-NEXT: s_xor_b64 vcc, s[4:5], vcc
; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
-; GFX8-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
-; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v5, -v1, vcc
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_saddsat_i64:
@@ -470,8 +462,7 @@ define i64 @v_saddsat_i64(i64 %lhs, i64 %rhs) {
; GFX9-NEXT: v_ashrrev_i32_e32 v1, 31, v5
; GFX9-NEXT: s_xor_b64 vcc, s[4:5], vcc
; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
-; GFX9-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
-; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v5, -v1, vcc
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: v_saddsat_i64:
@@ -480,12 +471,11 @@ define i64 @v_saddsat_i64(i64 %lhs, i64 %rhs) {
; GFX10-NEXT: v_add_co_u32 v4, vcc_lo, v0, v2
; GFX10-NEXT: v_add_co_ci_u32_e32 v5, vcc_lo, v1, v3, vcc_lo
; GFX10-NEXT: v_cmp_gt_i64_e64 s4, 0, v[2:3]
-; GFX10-NEXT: v_ashrrev_i32_e32 v6, 31, v5
; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[4:5], v[0:1]
-; GFX10-NEXT: v_xor_b32_e32 v1, 0x80000000, v6
+; GFX10-NEXT: v_ashrrev_i32_e32 v1, 31, v5
; GFX10-NEXT: s_xor_b32 vcc_lo, s4, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e32 v0, v4, v6, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v5, -v1, vcc_lo
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_saddsat_i64:
@@ -494,11 +484,11 @@ define i64 @v_saddsat_i64(i64 %lhs, i64 %rhs) {
; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, v0, v2
; GFX11-NEXT: v_add_co_ci_u32_e64 v5, null, v1, v3, vcc_lo
; GFX11-NEXT: v_cmp_gt_i64_e64 s0, 0, v[2:3]
-; GFX11-NEXT: v_ashrrev_i32_e32 v6, 31, v5
; GFX11-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[4:5], v[0:1]
-; GFX11-NEXT: v_xor_b32_e32 v1, 0x80000000, v6
+; GFX11-NEXT: v_ashrrev_i32_e32 v1, 31, v5
; GFX11-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo
-; GFX11-NEXT: v_dual_cndmask_b32 v0, v4, v6 :: v_dual_cndmask_b32 v1, v5, v1
+; GFX11-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v5, -v1, vcc_lo
; GFX11-NEXT: s_setpc_b64 s[30:31]
%result = call i64 @llvm.sadd.sat.i64(i64 %lhs, i64 %rhs)
ret i64 %result
diff --git a/llvm/test/CodeGen/AMDGPU/ssubsat.ll b/llvm/test/CodeGen/AMDGPU/ssubsat.ll
index 40d80f5..09c0e77 100644
--- a/llvm/test/CodeGen/AMDGPU/ssubsat.ll
+++ b/llvm/test/CodeGen/AMDGPU/ssubsat.ll
@@ -124,9 +124,8 @@ define i32 @v_ssubsat_i32(i32 %lhs, i32 %rhs) {
; GFX6-NEXT: v_sub_i32_e64 v1, s[4:5], v0, v1
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v1, v0
; GFX6-NEXT: v_ashrrev_i32_e32 v0, 31, v1
-; GFX6-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v1, -v0, s[4:5]
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_ssubsat_i32:
@@ -136,9 +135,8 @@ define i32 @v_ssubsat_i32(i32 %lhs, i32 %rhs) {
; GFX8-NEXT: v_sub_u32_e64 v1, s[4:5], v0, v1
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v1, v0
; GFX8-NEXT: v_ashrrev_i32_e32 v0, 31, v1
-; GFX8-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v1, -v0, s[4:5]
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_ssubsat_i32:
@@ -383,16 +381,14 @@ define <2 x i32> @v_ssubsat_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
; GFX6-NEXT: v_sub_i32_e64 v2, s[4:5], v0, v2
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v2, v0
; GFX6-NEXT: v_ashrrev_i32_e32 v0, 31, v2
-; GFX6-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v2, -v0, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v2, s[4:5], v1, v3
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v3
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v2, v1
; GFX6-NEXT: v_ashrrev_i32_e32 v1, 31, v2
-; GFX6-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v2, -v1, s[4:5]
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_ssubsat_v2i32:
@@ -402,16 +398,14 @@ define <2 x i32> @v_ssubsat_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
; GFX8-NEXT: v_sub_u32_e64 v2, s[4:5], v0, v2
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v2, v0
; GFX8-NEXT: v_ashrrev_i32_e32 v0, 31, v2
-; GFX8-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v2, -v0, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v2, s[4:5], v1, v3
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v3
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v2, v1
; GFX8-NEXT: v_ashrrev_i32_e32 v1, 31, v2
-; GFX8-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v2, -v1, s[4:5]
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_ssubsat_v2i32:
@@ -439,23 +433,20 @@ define <3 x i32> @v_ssubsat_v3i32(<3 x i32> %lhs, <3 x i32> %rhs) {
; GFX6-NEXT: v_sub_i32_e64 v3, s[4:5], v0, v3
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v3, v0
; GFX6-NEXT: v_ashrrev_i32_e32 v0, 31, v3
-; GFX6-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v3, -v0, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v3, s[4:5], v1, v4
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v4
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v3, v1
; GFX6-NEXT: v_ashrrev_i32_e32 v1, 31, v3
-; GFX6-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v3, -v1, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v3, s[4:5], v2, v5
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v5
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v3, v2
; GFX6-NEXT: v_ashrrev_i32_e32 v2, 31, v3
-; GFX6-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v2, v3, -v2, s[4:5]
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_ssubsat_v3i32:
@@ -465,23 +456,20 @@ define <3 x i32> @v_ssubsat_v3i32(<3 x i32> %lhs, <3 x i32> %rhs) {
; GFX8-NEXT: v_sub_u32_e64 v3, s[4:5], v0, v3
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v3, v0
; GFX8-NEXT: v_ashrrev_i32_e32 v0, 31, v3
-; GFX8-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v0, v3, v0, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v3, -v0, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v3, s[4:5], v1, v4
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v4
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v3, v1
; GFX8-NEXT: v_ashrrev_i32_e32 v1, 31, v3
-; GFX8-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v3, -v1, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v3, s[4:5], v2, v5
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v5
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v3, v2
; GFX8-NEXT: v_ashrrev_i32_e32 v2, 31, v3
-; GFX8-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v3, -v2, s[4:5]
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_ssubsat_v3i32:
@@ -511,30 +499,26 @@ define <4 x i32> @v_ssubsat_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
; GFX6-NEXT: v_sub_i32_e64 v4, s[4:5], v0, v4
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v4, v0
; GFX6-NEXT: v_ashrrev_i32_e32 v0, 31, v4
-; GFX6-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v4, -v0, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v4, s[4:5], v1, v5
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v5
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v4, v1
; GFX6-NEXT: v_ashrrev_i32_e32 v1, 31, v4
-; GFX6-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v1, v4, v1, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v4, -v1, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v4, s[4:5], v2, v6
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v6
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v4, v2
; GFX6-NEXT: v_ashrrev_i32_e32 v2, 31, v4
-; GFX6-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v2, v4, -v2, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v4, s[4:5], v3, v7
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v7
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v4, v3
; GFX6-NEXT: v_ashrrev_i32_e32 v3, 31, v4
-; GFX6-NEXT: v_xor_b32_e32 v3, 0x80000000, v3
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v3, v4, v3, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v3, v4, -v3, s[4:5]
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_ssubsat_v4i32:
@@ -544,30 +528,26 @@ define <4 x i32> @v_ssubsat_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
; GFX8-NEXT: v_sub_u32_e64 v4, s[4:5], v0, v4
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v4, v0
; GFX8-NEXT: v_ashrrev_i32_e32 v0, 31, v4
-; GFX8-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v4, -v0, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v4, s[4:5], v1, v5
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v5
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v4, v1
; GFX8-NEXT: v_ashrrev_i32_e32 v1, 31, v4
-; GFX8-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v1, v4, v1, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v4, -v1, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v4, s[4:5], v2, v6
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v6
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v4, v2
; GFX8-NEXT: v_ashrrev_i32_e32 v2, 31, v4
-; GFX8-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v4, -v2, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v4, s[4:5], v3, v7
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v7
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v4, v3
; GFX8-NEXT: v_ashrrev_i32_e32 v3, 31, v4
-; GFX8-NEXT: v_xor_b32_e32 v3, 0x80000000, v3
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v3, v4, v3, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v4, -v3, s[4:5]
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_ssubsat_v4i32:
@@ -599,58 +579,50 @@ define <8 x i32> @v_ssubsat_v8i32(<8 x i32> %lhs, <8 x i32> %rhs) {
; GFX6-NEXT: v_sub_i32_e64 v8, s[4:5], v0, v8
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v0
; GFX6-NEXT: v_ashrrev_i32_e32 v0, 31, v8
-; GFX6-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v8, -v0, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v8, s[4:5], v1, v9
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v9
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v1
; GFX6-NEXT: v_ashrrev_i32_e32 v1, 31, v8
-; GFX6-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v1, v8, v1, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v8, -v1, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v8, s[4:5], v2, v10
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v10
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v2
; GFX6-NEXT: v_ashrrev_i32_e32 v2, 31, v8
-; GFX6-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v2, v8, v2, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v2, v8, -v2, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v8, s[4:5], v3, v11
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v11
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v3
; GFX6-NEXT: v_ashrrev_i32_e32 v3, 31, v8
-; GFX6-NEXT: v_xor_b32_e32 v3, 0x80000000, v3
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v3, v8, v3, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v3, v8, -v3, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v8, s[4:5], v4, v12
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v12
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v4
; GFX6-NEXT: v_ashrrev_i32_e32 v4, 31, v8
-; GFX6-NEXT: v_xor_b32_e32 v4, 0x80000000, v4
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v4, v8, v4, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v4, v8, -v4, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v8, s[4:5], v5, v13
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v13
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v5
; GFX6-NEXT: v_ashrrev_i32_e32 v5, 31, v8
-; GFX6-NEXT: v_xor_b32_e32 v5, 0x80000000, v5
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v5, v8, v5, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v5, v8, -v5, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v8, s[4:5], v6, v14
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v14
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v6
; GFX6-NEXT: v_ashrrev_i32_e32 v6, 31, v8
-; GFX6-NEXT: v_xor_b32_e32 v6, 0x80000000, v6
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v6, v8, v6, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v6, v8, -v6, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v8, s[4:5], v7, v15
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v15
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v7
; GFX6-NEXT: v_ashrrev_i32_e32 v7, 31, v8
-; GFX6-NEXT: v_xor_b32_e32 v7, 0x80000000, v7
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v7, v8, v7, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v7, v8, -v7, s[4:5]
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_ssubsat_v8i32:
@@ -660,58 +632,50 @@ define <8 x i32> @v_ssubsat_v8i32(<8 x i32> %lhs, <8 x i32> %rhs) {
; GFX8-NEXT: v_sub_u32_e64 v8, s[4:5], v0, v8
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v0
; GFX8-NEXT: v_ashrrev_i32_e32 v0, 31, v8
-; GFX8-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v0, v8, v0, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v8, -v0, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v8, s[4:5], v1, v9
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v9
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v1
; GFX8-NEXT: v_ashrrev_i32_e32 v1, 31, v8
-; GFX8-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v1, v8, v1, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v8, -v1, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v8, s[4:5], v2, v10
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v10
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v2
; GFX8-NEXT: v_ashrrev_i32_e32 v2, 31, v8
-; GFX8-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v2, v8, v2, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v8, -v2, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v8, s[4:5], v3, v11
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v11
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v3
; GFX8-NEXT: v_ashrrev_i32_e32 v3, 31, v8
-; GFX8-NEXT: v_xor_b32_e32 v3, 0x80000000, v3
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v3, v8, v3, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v8, -v3, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v8, s[4:5], v4, v12
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v12
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v4
; GFX8-NEXT: v_ashrrev_i32_e32 v4, 31, v8
-; GFX8-NEXT: v_xor_b32_e32 v4, 0x80000000, v4
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v4, v8, v4, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v4, v8, -v4, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v8, s[4:5], v5, v13
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v13
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v5
; GFX8-NEXT: v_ashrrev_i32_e32 v5, 31, v8
-; GFX8-NEXT: v_xor_b32_e32 v5, 0x80000000, v5
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v5, v8, v5, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v5, v8, -v5, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v8, s[4:5], v6, v14
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v14
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v6
; GFX8-NEXT: v_ashrrev_i32_e32 v6, 31, v8
-; GFX8-NEXT: v_xor_b32_e32 v6, 0x80000000, v6
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v6, v8, v6, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v6, v8, -v6, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v8, s[4:5], v7, v15
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v15
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v8, v7
; GFX8-NEXT: v_ashrrev_i32_e32 v7, 31, v8
-; GFX8-NEXT: v_xor_b32_e32 v7, 0x80000000, v7
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v7, v8, v7, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v7, v8, -v7, s[4:5]
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_ssubsat_v8i32:
@@ -751,116 +715,100 @@ define <16 x i32> @v_ssubsat_v16i32(<16 x i32> %lhs, <16 x i32> %rhs) {
; GFX6-NEXT: v_sub_i32_e64 v16, s[4:5], v0, v16
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v16, v0
; GFX6-NEXT: v_ashrrev_i32_e32 v0, 31, v16
-; GFX6-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v0, v16, v0, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v16, -v0, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v16, s[4:5], v1, v17
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v17
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v16, v1
; GFX6-NEXT: v_ashrrev_i32_e32 v1, 31, v16
-; GFX6-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v1, v16, v1, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v16, -v1, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v16, s[4:5], v2, v18
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v18
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v16, v2
; GFX6-NEXT: v_ashrrev_i32_e32 v2, 31, v16
-; GFX6-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v2, v16, v2, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v2, v16, -v2, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v16, s[4:5], v3, v19
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v19
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v16, v3
; GFX6-NEXT: v_ashrrev_i32_e32 v3, 31, v16
-; GFX6-NEXT: v_xor_b32_e32 v3, 0x80000000, v3
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v3, v16, v3, vcc
-; GFX6-NEXT: v_sub_i32_e64 v16, s[4:5], v4, v20
-; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v20
-; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v16, v4
-; GFX6-NEXT: v_ashrrev_i32_e32 v4, 31, v16
-; GFX6-NEXT: v_xor_b32_e32 v4, 0x80000000, v4
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v4, v16, v4, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v3, v16, -v3, s[4:5]
; GFX6-NEXT: buffer_load_dword v16, off, s[0:3], s32
+; GFX6-NEXT: v_sub_i32_e64 v17, s[4:5], v4, v20
+; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v20
+; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v4
+; GFX6-NEXT: v_ashrrev_i32_e32 v4, 31, v17
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v4, v17, -v4, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v17, s[4:5], v5, v21
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v21
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v5
; GFX6-NEXT: v_ashrrev_i32_e32 v5, 31, v17
-; GFX6-NEXT: v_xor_b32_e32 v5, 0x80000000, v5
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v5, v17, v5, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v5, v17, -v5, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v17, s[4:5], v6, v22
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v22
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v6
; GFX6-NEXT: v_ashrrev_i32_e32 v6, 31, v17
-; GFX6-NEXT: v_xor_b32_e32 v6, 0x80000000, v6
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v6, v17, v6, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v6, v17, -v6, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v17, s[4:5], v7, v23
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v23
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v7
; GFX6-NEXT: v_ashrrev_i32_e32 v7, 31, v17
-; GFX6-NEXT: v_xor_b32_e32 v7, 0x80000000, v7
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v7, v17, v7, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v7, v17, -v7, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v17, s[4:5], v8, v24
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v24
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v8
; GFX6-NEXT: v_ashrrev_i32_e32 v8, 31, v17
-; GFX6-NEXT: v_xor_b32_e32 v8, 0x80000000, v8
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v8, v17, v8, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v8, v17, -v8, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v17, s[4:5], v9, v25
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v25
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v9
; GFX6-NEXT: v_ashrrev_i32_e32 v9, 31, v17
-; GFX6-NEXT: v_xor_b32_e32 v9, 0x80000000, v9
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v9, v17, v9, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v9, v17, -v9, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v17, s[4:5], v10, v26
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v26
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v10
; GFX6-NEXT: v_ashrrev_i32_e32 v10, 31, v17
-; GFX6-NEXT: v_xor_b32_e32 v10, 0x80000000, v10
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v10, v17, v10, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v10, v17, -v10, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v17, s[4:5], v11, v27
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v27
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v11
; GFX6-NEXT: v_ashrrev_i32_e32 v11, 31, v17
-; GFX6-NEXT: v_xor_b32_e32 v11, 0x80000000, v11
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v11, v17, v11, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v11, v17, -v11, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v17, s[4:5], v12, v28
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v28
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v12
; GFX6-NEXT: v_ashrrev_i32_e32 v12, 31, v17
-; GFX6-NEXT: v_xor_b32_e32 v12, 0x80000000, v12
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v12, v17, v12, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v12, v17, -v12, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v17, s[4:5], v13, v29
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v29
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v13
; GFX6-NEXT: v_ashrrev_i32_e32 v13, 31, v17
-; GFX6-NEXT: v_xor_b32_e32 v13, 0x80000000, v13
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v13, v17, v13, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v13, v17, -v13, s[4:5]
; GFX6-NEXT: v_sub_i32_e64 v17, s[4:5], v14, v30
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v30
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v14
; GFX6-NEXT: v_ashrrev_i32_e32 v14, 31, v17
-; GFX6-NEXT: v_xor_b32_e32 v14, 0x80000000, v14
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v14, v17, v14, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v14, v17, -v14, s[4:5]
; GFX6-NEXT: s_waitcnt vmcnt(0)
; GFX6-NEXT: v_cmp_lt_i32_e32 vcc, 0, v16
; GFX6-NEXT: v_sub_i32_e64 v16, s[4:5], v15, v16
; GFX6-NEXT: v_cmp_lt_i32_e64 s[4:5], v16, v15
; GFX6-NEXT: v_ashrrev_i32_e32 v15, 31, v16
-; GFX6-NEXT: v_xor_b32_e32 v15, 0x80000000, v15
-; GFX6-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT: v_cndmask_b32_e32 v15, v16, v15, vcc
+; GFX6-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v15, v16, -v15, s[4:5]
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_ssubsat_v16i32:
@@ -870,116 +818,100 @@ define <16 x i32> @v_ssubsat_v16i32(<16 x i32> %lhs, <16 x i32> %rhs) {
; GFX8-NEXT: v_sub_u32_e64 v16, s[4:5], v0, v16
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v16, v0
; GFX8-NEXT: v_ashrrev_i32_e32 v0, 31, v16
-; GFX8-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v0, v16, v0, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v16, -v0, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v16, s[4:5], v1, v17
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v17
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v16, v1
; GFX8-NEXT: v_ashrrev_i32_e32 v1, 31, v16
-; GFX8-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v1, v16, v1, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v16, -v1, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v16, s[4:5], v2, v18
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v18
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v16, v2
; GFX8-NEXT: v_ashrrev_i32_e32 v2, 31, v16
-; GFX8-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v2, v16, v2, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v16, -v2, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v16, s[4:5], v3, v19
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v19
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v16, v3
; GFX8-NEXT: v_ashrrev_i32_e32 v3, 31, v16
-; GFX8-NEXT: v_xor_b32_e32 v3, 0x80000000, v3
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v3, v16, v3, vcc
-; GFX8-NEXT: v_sub_u32_e64 v16, s[4:5], v4, v20
-; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v20
-; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v16, v4
-; GFX8-NEXT: v_ashrrev_i32_e32 v4, 31, v16
-; GFX8-NEXT: v_xor_b32_e32 v4, 0x80000000, v4
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v4, v16, v4, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v16, -v3, s[4:5]
; GFX8-NEXT: buffer_load_dword v16, off, s[0:3], s32
+; GFX8-NEXT: v_sub_u32_e64 v17, s[4:5], v4, v20
+; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v20
+; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v4
+; GFX8-NEXT: v_ashrrev_i32_e32 v4, 31, v17
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v4, v17, -v4, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v17, s[4:5], v5, v21
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v21
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v5
; GFX8-NEXT: v_ashrrev_i32_e32 v5, 31, v17
-; GFX8-NEXT: v_xor_b32_e32 v5, 0x80000000, v5
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v5, v17, v5, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v5, v17, -v5, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v17, s[4:5], v6, v22
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v22
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v6
; GFX8-NEXT: v_ashrrev_i32_e32 v6, 31, v17
-; GFX8-NEXT: v_xor_b32_e32 v6, 0x80000000, v6
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v6, v17, v6, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v6, v17, -v6, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v17, s[4:5], v7, v23
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v23
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v7
; GFX8-NEXT: v_ashrrev_i32_e32 v7, 31, v17
-; GFX8-NEXT: v_xor_b32_e32 v7, 0x80000000, v7
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v7, v17, v7, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v7, v17, -v7, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v17, s[4:5], v8, v24
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v24
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v8
; GFX8-NEXT: v_ashrrev_i32_e32 v8, 31, v17
-; GFX8-NEXT: v_xor_b32_e32 v8, 0x80000000, v8
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v8, v17, v8, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v8, v17, -v8, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v17, s[4:5], v9, v25
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v25
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v9
; GFX8-NEXT: v_ashrrev_i32_e32 v9, 31, v17
-; GFX8-NEXT: v_xor_b32_e32 v9, 0x80000000, v9
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v9, v17, v9, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v9, v17, -v9, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v17, s[4:5], v10, v26
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v26
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v10
; GFX8-NEXT: v_ashrrev_i32_e32 v10, 31, v17
-; GFX8-NEXT: v_xor_b32_e32 v10, 0x80000000, v10
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v10, v17, v10, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v10, v17, -v10, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v17, s[4:5], v11, v27
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v27
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v11
; GFX8-NEXT: v_ashrrev_i32_e32 v11, 31, v17
-; GFX8-NEXT: v_xor_b32_e32 v11, 0x80000000, v11
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v11, v17, v11, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v11, v17, -v11, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v17, s[4:5], v12, v28
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v28
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v12
; GFX8-NEXT: v_ashrrev_i32_e32 v12, 31, v17
-; GFX8-NEXT: v_xor_b32_e32 v12, 0x80000000, v12
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v12, v17, v12, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v12, v17, -v12, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v17, s[4:5], v13, v29
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v29
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v13
; GFX8-NEXT: v_ashrrev_i32_e32 v13, 31, v17
-; GFX8-NEXT: v_xor_b32_e32 v13, 0x80000000, v13
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v13, v17, v13, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v13, v17, -v13, s[4:5]
; GFX8-NEXT: v_sub_u32_e64 v17, s[4:5], v14, v30
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v30
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v17, v14
; GFX8-NEXT: v_ashrrev_i32_e32 v14, 31, v17
-; GFX8-NEXT: v_xor_b32_e32 v14, 0x80000000, v14
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v14, v17, v14, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v14, v17, -v14, s[4:5]
; GFX8-NEXT: s_waitcnt vmcnt(0)
; GFX8-NEXT: v_cmp_lt_i32_e32 vcc, 0, v16
; GFX8-NEXT: v_sub_u32_e64 v16, s[4:5], v15, v16
; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v16, v15
; GFX8-NEXT: v_ashrrev_i32_e32 v15, 31, v16
-; GFX8-NEXT: v_xor_b32_e32 v15, 0x80000000, v15
-; GFX8-NEXT: s_xor_b64 vcc, vcc, s[4:5]
-; GFX8-NEXT: v_cndmask_b32_e32 v15, v16, v15, vcc
+; GFX8-NEXT: s_xor_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v15, v16, -v15, s[4:5]
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_ssubsat_v16i32:
@@ -1066,8 +998,7 @@ define i64 @v_ssubsat_i64(i64 %lhs, i64 %rhs) {
; GFX6-NEXT: v_ashrrev_i32_e32 v1, 31, v5
; GFX6-NEXT: s_xor_b64 vcc, s[4:5], vcc
; GFX6-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
-; GFX6-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
-; GFX6-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v5, -v1, vcc
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_ssubsat_i64:
@@ -1080,8 +1011,7 @@ define i64 @v_ssubsat_i64(i64 %lhs, i64 %rhs) {
; GFX8-NEXT: v_ashrrev_i32_e32 v1, 31, v5
; GFX8-NEXT: s_xor_b64 vcc, s[4:5], vcc
; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
-; GFX8-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
-; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v5, -v1, vcc
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_ssubsat_i64:
@@ -1094,8 +1024,7 @@ define i64 @v_ssubsat_i64(i64 %lhs, i64 %rhs) {
; GFX9-NEXT: v_ashrrev_i32_e32 v1, 31, v5
; GFX9-NEXT: s_xor_b64 vcc, s[4:5], vcc
; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
-; GFX9-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
-; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v5, -v1, vcc
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: v_ssubsat_i64:
@@ -1104,12 +1033,11 @@ define i64 @v_ssubsat_i64(i64 %lhs, i64 %rhs) {
; GFX10-NEXT: v_sub_co_u32 v4, vcc_lo, v0, v2
; GFX10-NEXT: v_sub_co_ci_u32_e32 v5, vcc_lo, v1, v3, vcc_lo
; GFX10-NEXT: v_cmp_lt_i64_e64 s4, 0, v[2:3]
-; GFX10-NEXT: v_ashrrev_i32_e32 v6, 31, v5
; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[4:5], v[0:1]
-; GFX10-NEXT: v_xor_b32_e32 v1, 0x80000000, v6
+; GFX10-NEXT: v_ashrrev_i32_e32 v1, 31, v5
; GFX10-NEXT: s_xor_b32 vcc_lo, s4, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e32 v0, v4, v6, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v5, -v1, vcc_lo
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_ssubsat_i64:
@@ -1118,11 +1046,11 @@ define i64 @v_ssubsat_i64(i64 %lhs, i64 %rhs) {
; GFX11-NEXT: v_sub_co_u32 v4, vcc_lo, v0, v2
; GFX11-NEXT: v_sub_co_ci_u32_e64 v5, null, v1, v3, vcc_lo
; GFX11-NEXT: v_cmp_lt_i64_e64 s0, 0, v[2:3]
-; GFX11-NEXT: v_ashrrev_i32_e32 v6, 31, v5
; GFX11-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[4:5], v[0:1]
-; GFX11-NEXT: v_xor_b32_e32 v1, 0x80000000, v6
+; GFX11-NEXT: v_ashrrev_i32_e32 v1, 31, v5
; GFX11-NEXT: s_xor_b32 vcc_lo, s0, vcc_lo
-; GFX11-NEXT: v_dual_cndmask_b32 v0, v4, v6 :: v_dual_cndmask_b32 v1, v5, v1
+; GFX11-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v5, -v1, vcc_lo
; GFX11-NEXT: s_setpc_b64 s[30:31]
%result = call i64 @llvm.ssub.sat.i64(i64 %lhs, i64 %rhs)
ret i64 %result
diff --git a/llvm/test/CodeGen/AVR/cmp.ll b/llvm/test/CodeGen/AVR/cmp.ll
index efc9b8d..c932bda1 100644
--- a/llvm/test/CodeGen/AVR/cmp.ll
+++ b/llvm/test/CodeGen/AVR/cmp.ll
@@ -298,3 +298,18 @@ define i16 @cmp_i16_gt_1023(i16 %0) {
%3 = zext i1 %2 to i16
ret i16 %3
}
+
+define void @cmp_issue152097(i16 %a) addrspace(1) {
+; See: https://github.com/llvm/llvm-project/issues/152097
+; CHECK-LABEL: cmp_issue152097
+; CHECK: ldi r18, -1
+; CHECK-NEXT: cpi r24, -2
+; CHECK-NEXT: cpc r25, r18
+; CHECK-NEXT: ret
+ %cmp = icmp ugt i16 -2, %a
+ br i1 %cmp, label %if.then, label %if.else
+if.then:
+ ret void
+if.else:
+ ret void
+}
diff --git a/llvm/test/CodeGen/DirectX/Binding/binding-overlap-7.ll b/llvm/test/CodeGen/DirectX/Binding/binding-overlap-7.ll
new file mode 100644
index 0000000..25f81dd
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/Binding/binding-overlap-7.ll
@@ -0,0 +1,35 @@
+; Use llc for this test so that we don't abort after the first error.
+; RUN: not llc %s -o /dev/null 2>&1 | FileCheck %s
+
+; Check that there is no overlap with unbounded array in different space
+
+ ; Buffer<double> A[2] : register(t2, space4);
+ ; Buffer<double> B : register(t20, space5); // does not overlap
+ ; Buffer<double> C[] : register(t2, space4); // overlaps with A
+
+; CHECK: error: resource A at register 2 overlaps with resource C at register 2 in space 4
+; CHECK-NOT: error: resource C at register 2 overlaps with resource B at register 20 in space 5
+
+target triple = "dxil-pc-shadermodel6.3-library"
+
+@A.str = private unnamed_addr constant [2 x i8] c"A\00", align 1
+@B.str = private unnamed_addr constant [2 x i8] c"B\00", align 1
+@C.str = private unnamed_addr constant [2 x i8] c"C\00", align 1
+
+define void @test_not_overlapping_in_different_spaces() {
+entry:
+
+ ; Buffer<double> A[2] : register(t2, space4);
+ %h0 = call target("dx.TypedBuffer", double, 0, 0, 0)
+ @llvm.dx.resource.handlefrombinding(i32 4, i32 2, i32 2, i32 10, i1 false, ptr @A.str)
+
+ ; Buffer<double> B : register(t20, space5);
+ %h1 = call target("dx.TypedBuffer", i64, 0, 0, 0)
+ @llvm.dx.resource.handlefrombinding(i32 5, i32 20, i32 1, i32 0, i1 false, ptr @B.str)
+
+ ; Buffer<double> C[] : register(t2, space4);
+ %h2 = call target("dx.TypedBuffer", double, 0, 0, 0)
+ @llvm.dx.resource.handlefrombinding(i32 4, i32 2, i32 -1, i32 10, i1 false, ptr @C.str)
+
+ ret void
+}
diff --git a/llvm/test/CodeGen/DirectX/imad.ll b/llvm/test/CodeGen/DirectX/imad.ll
index 5d9463d..2e612f0 100644
--- a/llvm/test/CodeGen/DirectX/imad.ll
+++ b/llvm/test/CodeGen/DirectX/imad.ll
@@ -1,17 +1,13 @@
-; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+; RUN: opt -S -scalarizer -dxil-op-lower < %s | FileCheck %s
; Make sure dxil operation function calls for round are generated for float and half.
-; CHECK:call i16 @dx.op.tertiary.i16(i32 48, i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}}) #[[#ATTR:]]
-; CHECK:call i32 @dx.op.tertiary.i32(i32 48, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) #[[#ATTR]]
-; CHECK:call i64 @dx.op.tertiary.i64(i32 48, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}) #[[#ATTR]]
-
-; CHECK: attributes #[[#ATTR]] = {{{.*}} memory(none) {{.*}}}
target datalayout = "e-m:e-p:32:32-i1:32-i8:8-i16:16-i32:32-i64:64-f16:16-f32:32-f64:64-n8:16:32:64"
target triple = "dxil-pc-shadermodel6.7-library"
; Function Attrs: noinline nounwind optnone
define noundef i16 @imad_short(i16 noundef %p0, i16 noundef %p1, i16 noundef %p2) #0 {
entry:
+ ; CHECK: call i16 @dx.op.tertiary.i16(i32 48, i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}}) #[[#ATTR:]]
%p2.addr = alloca i16, align 2
%p1.addr = alloca i16, align 2
%p0.addr = alloca i16, align 2
@@ -31,6 +27,7 @@ declare i16 @llvm.dx.imad.i16(i16, i16, i16) #1
; Function Attrs: noinline nounwind optnone
define noundef i32 @imad_int(i32 noundef %p0, i32 noundef %p1, i32 noundef %p2) #0 {
entry:
+ ; CHECK: call i32 @dx.op.tertiary.i32(i32 48, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) #[[#ATTR]]
%p2.addr = alloca i32, align 4
%p1.addr = alloca i32, align 4
%p0.addr = alloca i32, align 4
@@ -50,6 +47,7 @@ declare i32 @llvm.dx.imad.i32(i32, i32, i32) #1
; Function Attrs: noinline nounwind optnone
define noundef i64 @imad_int64(i64 noundef %p0, i64 noundef %p1, i64 noundef %p2) #0 {
entry:
+ ; CHECK: call i64 @dx.op.tertiary.i64(i32 48, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}) #[[#ATTR]]
%p2.addr = alloca i64, align 8
%p1.addr = alloca i64, align 8
%p0.addr = alloca i64, align 8
@@ -65,3 +63,95 @@ entry:
; Function Attrs: nocallback nofree nosync nounwind willreturn
declare i64 @llvm.dx.imad.i64(i64, i64, i64) #1
+
+; Function Attrs: noinline nounwind optnone
+define noundef <4 x i16> @imad_int16_t4(<4 x i16> noundef %p0, <4 x i16> noundef %p1, <4 x i16> noundef %p2) #0 {
+entry:
+ ; CHECK: extractelement <4 x i16> %p0, i64 0
+ ; CHECK: extractelement <4 x i16> %p1, i64 0
+ ; CHECK: extractelement <4 x i16> %p2, i64 0
+ ; CHECK: call i16 @dx.op.tertiary.i16(i32 48, i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}}) #[[#ATTR]]
+ ; CHECK: extractelement <4 x i16> %p0, i64 1
+ ; CHECK: extractelement <4 x i16> %p1, i64 1
+ ; CHECK: extractelement <4 x i16> %p2, i64 1
+ ; CHECK: call i16 @dx.op.tertiary.i16(i32 48, i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}}) #[[#ATTR]]
+ ; CHECK: extractelement <4 x i16> %p0, i64 2
+ ; CHECK: extractelement <4 x i16> %p1, i64 2
+ ; CHECK: extractelement <4 x i16> %p2, i64 2
+ ; CHECK: call i16 @dx.op.tertiary.i16(i32 48, i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}}) #[[#ATTR]]
+ ; CHECK: extractelement <4 x i16> %p0, i64 3
+ ; CHECK: extractelement <4 x i16> %p1, i64 3
+ ; CHECK: extractelement <4 x i16> %p2, i64 3
+ ; CHECK: call i16 @dx.op.tertiary.i16(i32 48, i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}}) #[[#ATTR]]
+ ; CHECK: insertelement <4 x i16> poison, i16 %{{.*}}, i64 0
+ ; CHECK: insertelement <4 x i16> %{{.*}}, i16 %{{.*}}, i64 1
+ ; CHECK: insertelement <4 x i16> %{{.*}}, i16 %{{.*}}, i64 2
+ ; CHECK: insertelement <4 x i16> %{{.*}}, i16 %{{.*}}, i64 3
+ %dx.imad = call <4 x i16> @llvm.dx.imad.v4i16(<4 x i16> %p0, <4 x i16> %p1, <4 x i16> %p2)
+ ret <4 x i16> %dx.imad
+}
+
+; Function Attrs: nocallback nofree nosync nounwind willreturn
+declare <4 x i16> @llvm.dx.imad.v4i16(<4 x i16>, <4 x i16>, <4 x i16>) #1
+
+; Function Attrs: noinline nounwind optnone
+define noundef <4 x i32> @imad_int4(<4 x i32> noundef %p0, <4 x i32> noundef %p1, <4 x i32> noundef %p2) #0 {
+entry:
+ ; CHECK: extractelement <4 x i32> %p0, i64 0
+ ; CHECK: extractelement <4 x i32> %p1, i64 0
+ ; CHECK: extractelement <4 x i32> %p2, i64 0
+ ; CHECK: call i32 @dx.op.tertiary.i32(i32 48, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) #[[#ATTR]]
+ ; CHECK: extractelement <4 x i32> %p0, i64 1
+ ; CHECK: extractelement <4 x i32> %p1, i64 1
+ ; CHECK: extractelement <4 x i32> %p2, i64 1
+ ; CHECK: call i32 @dx.op.tertiary.i32(i32 48, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) #[[#ATTR]]
+ ; CHECK: extractelement <4 x i32> %p0, i64 2
+ ; CHECK: extractelement <4 x i32> %p1, i64 2
+ ; CHECK: extractelement <4 x i32> %p2, i64 2
+ ; CHECK: call i32 @dx.op.tertiary.i32(i32 48, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) #[[#ATTR]]
+ ; CHECK: extractelement <4 x i32> %p0, i64 3
+ ; CHECK: extractelement <4 x i32> %p1, i64 3
+ ; CHECK: extractelement <4 x i32> %p2, i64 3
+ ; CHECK: call i32 @dx.op.tertiary.i32(i32 48, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) #[[#ATTR]]
+ ; CHECK: insertelement <4 x i32> poison, i32 %{{.*}}, i64 0
+ ; CHECK: insertelement <4 x i32> %{{.*}}, i32 %{{.*}}, i64 1
+ ; CHECK: insertelement <4 x i32> %{{.*}}, i32 %{{.*}}, i64 2
+ ; CHECK: insertelement <4 x i32> %{{.*}}, i32 %{{.*}}, i64 3
+ %dx.imad = call <4 x i32> @llvm.dx.imad.v4i32(<4 x i32> %p0, <4 x i32> %p1, <4 x i32> %p2)
+ ret <4 x i32> %dx.imad
+}
+
+; Function Attrs: nocallback nofree nosync nounwind willreturn
+declare <4 x i32> @llvm.dx.imad.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) #1
+
+; Function Attrs: noinline nounwind optnone
+define noundef <4 x i64> @imad_int64_t4(<4 x i64> noundef %p0, <4 x i64> noundef %p1, <4 x i64> noundef %p2) #0 {
+entry:
+ ; CHECK: extractelement <4 x i64> %p0, i64 0
+ ; CHECK: extractelement <4 x i64> %p1, i64 0
+ ; CHECK: extractelement <4 x i64> %p2, i64 0
+ ; CHECK: call i64 @dx.op.tertiary.i64(i32 48, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}) #[[#ATTR]]
+ ; CHECK: extractelement <4 x i64> %p0, i64 1
+ ; CHECK: extractelement <4 x i64> %p1, i64 1
+ ; CHECK: extractelement <4 x i64> %p2, i64 1
+ ; CHECK: call i64 @dx.op.tertiary.i64(i32 48, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}) #[[#ATTR]]
+ ; CHECK: extractelement <4 x i64> %p0, i64 2
+ ; CHECK: extractelement <4 x i64> %p1, i64 2
+ ; CHECK: extractelement <4 x i64> %p2, i64 2
+ ; CHECK: call i64 @dx.op.tertiary.i64(i32 48, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}) #[[#ATTR]]
+ ; CHECK: extractelement <4 x i64> %p0, i64 3
+ ; CHECK: extractelement <4 x i64> %p1, i64 3
+ ; CHECK: extractelement <4 x i64> %p2, i64 3
+ ; CHECK: call i64 @dx.op.tertiary.i64(i32 48, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}) #[[#ATTR]]
+ ; CHECK: insertelement <4 x i64> poison, i64 %{{.*}}, i64 0
+ ; CHECK: insertelement <4 x i64> %{{.*}}, i64 %{{.*}}, i64 1
+ ; CHECK: insertelement <4 x i64> %{{.*}}, i64 %{{.*}}, i64 2
+ ; CHECK: insertelement <4 x i64> %{{.*}}, i64 %{{.*}}, i64 3
+ %dx.imad = call <4 x i64> @llvm.dx.imad.v4i64(<4 x i64> %p0, <4 x i64> %p1, <4 x i64> %p2)
+ ret <4 x i64> %dx.imad
+}
+
+; Function Attrs: nocallback nofree nosync nounwind willreturn
+declare <4 x i64> @llvm.dx.imad.v4i64(<4 x i64>, <4 x i64>, <4 x i64>) #1
+
+; CHECK: attributes #[[#ATTR]] = {{{.*}} memory(none) {{.*}}}
diff --git a/llvm/test/CodeGen/DirectX/issue-140819_allow_forward_handle_on_alloca.ll b/llvm/test/CodeGen/DirectX/issue-140819_allow_forward_handle_on_alloca.ll
new file mode 100644
index 0000000..7c0813b
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/issue-140819_allow_forward_handle_on_alloca.ll
@@ -0,0 +1,33 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S -dxil-forward-handle-accesses %s | FileCheck %s
+
+%"class.hlsl::RWStructuredBuffer" = type { target("dx.RawBuffer", i32, 1, 0) }
+@global = internal unnamed_addr global %"class.hlsl::RWStructuredBuffer" poison, align 4
+@name = private unnamed_addr constant [5 x i8] c"dest\00", align 1
+
+
+; NOTE: intent of this test is to confirm load target("dx.RawBuffer", i32, 1, 0)
+; is replaced with call @llvm.dx.resource.getpointer
+define void @CSMain() local_unnamed_addr {
+; CHECK-LABEL: define void @CSMain() local_unnamed_addr {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[AGG_TMP_I1_SROA_0:%.*]] = alloca target("dx.RawBuffer", i32, 1, 0), align 8
+; CHECK-NEXT: [[TMP0:%.*]] = tail call target("dx.RawBuffer", i32, 1, 0) @llvm.dx.resource.handlefrombinding.tdx.RawBuffer_i32_1_0t(i32 0, i32 3, i32 1, i32 0, i1 false, ptr nonnull @name)
+; CHECK-NEXT: store target("dx.RawBuffer", i32, 1, 0) [[TMP0]], ptr @global, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr @global, align 4
+; CHECK-NEXT: store i32 [[TMP2]], ptr [[AGG_TMP_I1_SROA_0]], align 8
+; CHECK-NEXT: [[TMP3:%.*]] = tail call noundef nonnull align 4 dereferenceable(4) ptr @llvm.dx.resource.getpointer.p0.tdx.RawBuffer_i32_1_0t(target("dx.RawBuffer", i32, 1, 0) [[TMP0]], i32 0)
+; CHECK-NEXT: store i32 0, ptr [[TMP3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %alloca = alloca target("dx.RawBuffer", i32, 1, 0), align 8
+ %handle = tail call target("dx.RawBuffer", i32, 1, 0) @llvm.dx.resource.handlefrombinding.tdx.RawBuffer_i32_1_0t(i32 0, i32 3, i32 1, i32 0, i1 false, ptr nonnull @name)
+ store target("dx.RawBuffer", i32, 1, 0) %handle , ptr @global, align 4
+ %val = load i32, ptr @global, align 4
+ store i32 %val , ptr %alloca, align 8
+ %indirect = load target("dx.RawBuffer", i32, 1, 0), ptr %alloca, align 8
+ %buff = tail call noundef nonnull align 4 dereferenceable(4) ptr @llvm.dx.resource.getpointer.p0.tdx.RawBuffer_i32_1_0t(target("dx.RawBuffer", i32, 1, 0) %indirect, i32 0)
+ store i32 0, ptr %buff, align 4
+ ret void
+}
diff --git a/llvm/test/CodeGen/DirectX/umad.ll b/llvm/test/CodeGen/DirectX/umad.ll
index 104d238..76516a2 100644
--- a/llvm/test/CodeGen/DirectX/umad.ll
+++ b/llvm/test/CodeGen/DirectX/umad.ll
@@ -1,17 +1,13 @@
-; RUN: opt -S -dxil-op-lower < %s | FileCheck %s
+; RUN: opt -S -scalarizer -dxil-op-lower < %s | FileCheck %s
; Make sure dxil operation function calls for round are generated for float and half.
-; CHECK:call i16 @dx.op.tertiary.i16(i32 49, i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}}) #[[#ATTR:]]
-; CHECK:call i32 @dx.op.tertiary.i32(i32 49, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) #[[#ATTR]]
-; CHECK:call i64 @dx.op.tertiary.i64(i32 49, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}) #[[#ATTR]]
-
-; CHECK: attributes #[[#ATTR]] = {{{.*}} memory(none) {{.*}}}
target datalayout = "e-m:e-p:32:32-i1:32-i8:8-i16:16-i32:32-i64:64-f16:16-f32:32-f64:64-n8:16:32:64"
target triple = "dxil-pc-shadermodel6.7-library"
; Function Attrs: noinline nounwind optnone
define noundef i16 @umad_ushort(i16 noundef %p0, i16 noundef %p1, i16 noundef %p2) #0 {
entry:
+ ; CHECK: call i16 @dx.op.tertiary.i16(i32 49, i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}}) #[[#ATTR:]]
%p2.addr = alloca i16, align 2
%p1.addr = alloca i16, align 2
%p0.addr = alloca i16, align 2
@@ -31,6 +27,7 @@ declare i16 @llvm.dx.umad.i16(i16, i16, i16) #1
; Function Attrs: noinline nounwind optnone
define noundef i32 @umad_uint(i32 noundef %p0, i32 noundef %p1, i32 noundef %p2) #0 {
entry:
+ ; CHECK: call i32 @dx.op.tertiary.i32(i32 49, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) #[[#ATTR]]
%p2.addr = alloca i32, align 4
%p1.addr = alloca i32, align 4
%p0.addr = alloca i32, align 4
@@ -50,6 +47,7 @@ declare i32 @llvm.dx.umad.i32(i32, i32, i32) #1
; Function Attrs: noinline nounwind optnone
define noundef i64 @umad_uint64(i64 noundef %p0, i64 noundef %p1, i64 noundef %p2) #0 {
entry:
+ ; CHECK: call i64 @dx.op.tertiary.i64(i32 49, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}) #[[#ATTR]]
%p2.addr = alloca i64, align 8
%p1.addr = alloca i64, align 8
%p0.addr = alloca i64, align 8
@@ -65,3 +63,95 @@ entry:
; Function Attrs: nocallback nofree nosync nounwind willreturn
declare i64 @llvm.dx.umad.i64(i64, i64, i64) #1
+
+; Function Attrs: noinline nounwind optnone
+define noundef <4 x i16> @umad_uint16_t4(<4 x i16> noundef %p0, <4 x i16> noundef %p1, <4 x i16> noundef %p2) #0 {
+entry:
+ ; CHECK: extractelement <4 x i16> %p0, i64 0
+ ; CHECK: extractelement <4 x i16> %p1, i64 0
+ ; CHECK: extractelement <4 x i16> %p2, i64 0
+ ; CHECK: call i16 @dx.op.tertiary.i16(i32 49, i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}}) #[[#ATTR]]
+ ; CHECK: extractelement <4 x i16> %p0, i64 1
+ ; CHECK: extractelement <4 x i16> %p1, i64 1
+ ; CHECK: extractelement <4 x i16> %p2, i64 1
+ ; CHECK: call i16 @dx.op.tertiary.i16(i32 49, i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}}) #[[#ATTR]]
+ ; CHECK: extractelement <4 x i16> %p0, i64 2
+ ; CHECK: extractelement <4 x i16> %p1, i64 2
+ ; CHECK: extractelement <4 x i16> %p2, i64 2
+ ; CHECK: call i16 @dx.op.tertiary.i16(i32 49, i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}}) #[[#ATTR]]
+ ; CHECK: extractelement <4 x i16> %p0, i64 3
+ ; CHECK: extractelement <4 x i16> %p1, i64 3
+ ; CHECK: extractelement <4 x i16> %p2, i64 3
+ ; CHECK: call i16 @dx.op.tertiary.i16(i32 49, i16 %{{.*}}, i16 %{{.*}}, i16 %{{.*}}) #[[#ATTR]]
+ ; CHECK: insertelement <4 x i16> poison, i16 %{{.*}}, i64 0
+ ; CHECK: insertelement <4 x i16> %{{.*}}, i16 %{{.*}}, i64 1
+ ; CHECK: insertelement <4 x i16> %{{.*}}, i16 %{{.*}}, i64 2
+ ; CHECK: insertelement <4 x i16> %{{.*}}, i16 %{{.*}}, i64 3
+ %dx.umad = call <4 x i16> @llvm.dx.umad.v4i16(<4 x i16> %p0, <4 x i16> %p1, <4 x i16> %p2)
+ ret <4 x i16> %dx.umad
+}
+
+; Function Attrs: nocallback nofree nosync nounwind willreturn
+declare <4 x i16> @llvm.dx.umad.v4i16(<4 x i16>, <4 x i16>, <4 x i16>) #1
+
+; Function Attrs: noinline nounwind optnone
+define noundef <4 x i32> @umad_uint4(<4 x i32> noundef %p0, <4 x i32> noundef %p1, <4 x i32> noundef %p2) #0 {
+entry:
+ ; CHECK: extractelement <4 x i32> %p0, i64 0
+ ; CHECK: extractelement <4 x i32> %p1, i64 0
+ ; CHECK: extractelement <4 x i32> %p2, i64 0
+ ; CHECK: call i32 @dx.op.tertiary.i32(i32 49, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) #[[#ATTR]]
+ ; CHECK: extractelement <4 x i32> %p0, i64 1
+ ; CHECK: extractelement <4 x i32> %p1, i64 1
+ ; CHECK: extractelement <4 x i32> %p2, i64 1
+ ; CHECK: call i32 @dx.op.tertiary.i32(i32 49, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) #[[#ATTR]]
+ ; CHECK: extractelement <4 x i32> %p0, i64 2
+ ; CHECK: extractelement <4 x i32> %p1, i64 2
+ ; CHECK: extractelement <4 x i32> %p2, i64 2
+ ; CHECK: call i32 @dx.op.tertiary.i32(i32 49, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) #[[#ATTR]]
+ ; CHECK: extractelement <4 x i32> %p0, i64 3
+ ; CHECK: extractelement <4 x i32> %p1, i64 3
+ ; CHECK: extractelement <4 x i32> %p2, i64 3
+ ; CHECK: call i32 @dx.op.tertiary.i32(i32 49, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}) #[[#ATTR]]
+ ; CHECK: insertelement <4 x i32> poison, i32 %{{.*}}, i64 0
+ ; CHECK: insertelement <4 x i32> %{{.*}}, i32 %{{.*}}, i64 1
+ ; CHECK: insertelement <4 x i32> %{{.*}}, i32 %{{.*}}, i64 2
+ ; CHECK: insertelement <4 x i32> %{{.*}}, i32 %{{.*}}, i64 3
+ %dx.umad = call <4 x i32> @llvm.dx.umad.v4i32(<4 x i32> %p0, <4 x i32> %p1, <4 x i32> %p2)
+ ret <4 x i32> %dx.umad
+}
+
+; Function Attrs: nocallback nofree nosync nounwind willreturn
+declare <4 x i32> @llvm.dx.umad.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) #1
+
+; Function Attrs: noinline nounwind optnone
+define noundef <4 x i64> @umad_uint64_t4(<4 x i64> noundef %p0, <4 x i64> noundef %p1, <4 x i64> noundef %p2) #0 {
+entry:
+ ; CHECK: extractelement <4 x i64> %p0, i64 0
+ ; CHECK: extractelement <4 x i64> %p1, i64 0
+ ; CHECK: extractelement <4 x i64> %p2, i64 0
+ ; CHECK: call i64 @dx.op.tertiary.i64(i32 49, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}) #[[#ATTR]]
+ ; CHECK: extractelement <4 x i64> %p0, i64 1
+ ; CHECK: extractelement <4 x i64> %p1, i64 1
+ ; CHECK: extractelement <4 x i64> %p2, i64 1
+ ; CHECK: call i64 @dx.op.tertiary.i64(i32 49, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}) #[[#ATTR]]
+ ; CHECK: extractelement <4 x i64> %p0, i64 2
+ ; CHECK: extractelement <4 x i64> %p1, i64 2
+ ; CHECK: extractelement <4 x i64> %p2, i64 2
+ ; CHECK: call i64 @dx.op.tertiary.i64(i32 49, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}) #[[#ATTR]]
+ ; CHECK: extractelement <4 x i64> %p0, i64 3
+ ; CHECK: extractelement <4 x i64> %p1, i64 3
+ ; CHECK: extractelement <4 x i64> %p2, i64 3
+ ; CHECK: call i64 @dx.op.tertiary.i64(i32 49, i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}}) #[[#ATTR]]
+ ; CHECK: insertelement <4 x i64> poison, i64 %{{.*}}, i64 0
+ ; CHECK: insertelement <4 x i64> %{{.*}}, i64 %{{.*}}, i64 1
+ ; CHECK: insertelement <4 x i64> %{{.*}}, i64 %{{.*}}, i64 2
+ ; CHECK: insertelement <4 x i64> %{{.*}}, i64 %{{.*}}, i64 3
+ %dx.umad = call <4 x i64> @llvm.dx.umad.v4i64(<4 x i64> %p0, <4 x i64> %p1, <4 x i64> %p2)
+ ret <4 x i64> %dx.umad
+}
+
+; Function Attrs: nocallback nofree nosync nounwind willreturn
+declare <4 x i64> @llvm.dx.umad.v4i64(<4 x i64>, <4 x i64>, <4 x i64>) #1
+
+; CHECK: attributes #[[#ATTR]] = {{{.*}} memory(none) {{.*}}}
diff --git a/llvm/test/CodeGen/NVPTX/prefetch-inferas-test.ll b/llvm/test/CodeGen/NVPTX/prefetch-inferas-test.ll
new file mode 100644
index 0000000..3efe9be
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/prefetch-inferas-test.ll
@@ -0,0 +1,80 @@
+; RUN: opt < %s -S -passes=infer-address-spaces | FileCheck %s --check-prefix=INFER
+; RUN: llc < %s -mtriple=nvptx64 -mcpu=sm_90 -mattr=+ptx80 | FileCheck %s --check-prefix=PTX
+; RUN: %if ptxas-12.3 %{ llc < %s -mtriple=nvptx64 -mcpu=sm_90 -mattr=+ptx80 | %ptxas-verify -arch=sm_90 %}
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64"
+target triple = "nvptx64-unknown-unknown"
+
+@constant_tensormap = addrspace(4) global [64 x i8] zeroinitializer, align 64
+
+; Inference from const address space
+define void @test_infer_const_from_cast() {
+; INFER-LABEL: @test_infer_const_from_cast
+; INFER: call void @llvm.nvvm.prefetch.tensormap.p4(ptr addrspace(4) @constant_tensormap)
+; BOTH: call void @llvm.nvvm.prefetch.tensormap.p4(ptr addrspace(4) @constant_tensormap)
+; PTX-LABEL: .visible .func test_infer_const_from_cast(
+; PTX: mov.b64 %rd{{[0-9]+}}, constant_tensormap;
+; PTX: cvta.const.u64 %rd{{[0-9]+}}, %rd{{[0-9]+}};
+; PTX: prefetch.tensormap [%rd{{[0-9]+}}];
+entry:
+ %casted = addrspacecast ptr addrspace(4) @constant_tensormap to ptr
+ call void @llvm.nvvm.prefetch.tensormap.p0(ptr %casted)
+ ret void
+}
+
+; Cast from Const space to Generic
+define void @test_const_to_generic_cast(ptr addrspace(4) %const_ptr) {
+; INFER-LABEL: @test_const_to_generic_cast
+; INFER: call void @llvm.nvvm.prefetch.tensormap.p4(ptr addrspace(4) %const_ptr)
+; PTX-LABEL: .visible .func test_const_to_generic_cast(
+; PTX: prefetch.const.tensormap [%rd{{[0-9]+}}];
+entry:
+ %cast = addrspacecast ptr addrspace(4) %const_ptr to ptr
+ call void @llvm.nvvm.prefetch.tensormap.p0(ptr %cast)
+ ret void
+}
+
+; No inference possible
+define void @test_no_inference_possible(ptr %generic_ptr) {
+; INFER-LABEL: @test_no_inference_possible
+; INFER: call void @llvm.nvvm.prefetch.tensormap.p0(ptr %generic_ptr)
+; PTX-LABEL: .visible .func test_no_inference_possible(
+; PTX: prefetch.tensormap [%rd{{[0-9]+}}];
+entry:
+ call void @llvm.nvvm.prefetch.tensormap.p0(ptr %generic_ptr)
+ ret void
+}
+
+; Cast from Parameter space to Generic
+define void @test_param_to_generic_cast(ptr addrspace(101) %param_ptr) {
+; INFER-LABEL: @test_param_to_generic_cast
+; INFER: call void @llvm.nvvm.prefetch.tensormap.p101(ptr addrspace(101) %param_ptr)
+; PTX-LABEL: .visible .func test_param_to_generic_cast(
+; PTX: prefetch.param.tensormap [%rd{{[0-9]+}}];
+entry:
+ %cast = addrspacecast ptr addrspace(101) %param_ptr to ptr
+ call void @llvm.nvvm.prefetch.tensormap.p0(ptr %cast)
+ ret void
+}
+
+; Multiple casts in sequence
+define void @test_infer_through_multiple_casts() {
+; INFER-LABEL: @test_infer_through_multiple_casts
+; INFER: call void @llvm.nvvm.prefetch.tensormap.p4(ptr addrspace(4) @constant_tensormap)
+; PTX-LABEL: .visible .func test_infer_through_multiple_casts(
+; PTX: mov.b64 %rd{{[0-9]+}}, constant_tensormap;
+; PTX: cvta.const.u64 %rd{{[0-9]+}}, %rd{{[0-9]+}};
+; PTX: prefetch.tensormap [%rd{{[0-9]+}}];
+entry:
+ %cast1 = addrspacecast ptr addrspace(4) @constant_tensormap to ptr
+ %cast2 = addrspacecast ptr %cast1 to ptr addrspace(4)
+ %cast3 = addrspacecast ptr addrspace(4) %cast2 to ptr
+ call void @llvm.nvvm.prefetch.tensormap(ptr %cast3)
+ ret void
+}
+
+declare void @llvm.nvvm.prefetch.tensormap.p0(ptr)
+declare void @llvm.nvvm.prefetch.tensormap.p4(ptr addrspace(4))
+declare void @llvm.nvvm.prefetch.tensormap.p101(ptr addrspace(101))
+
+
diff --git a/llvm/test/CodeGen/NVPTX/prefetch.ll b/llvm/test/CodeGen/NVPTX/prefetch.ll
index a64e4fe..862e26d 100644
--- a/llvm/test/CodeGen/NVPTX/prefetch.ll
+++ b/llvm/test/CodeGen/NVPTX/prefetch.ll
@@ -12,6 +12,10 @@ declare void @llvm.nvvm.prefetch.local.L2(ptr addrspace(5) %local_ptr)
declare void @llvm.nvvm.prefetch.L1(ptr %ptr)
declare void @llvm.nvvm.prefetch.L2(ptr %ptr)
+declare void @llvm.nvvm.prefetch.tensormap.p0(ptr %ptr)
+declare void @llvm.nvvm.prefetch.tensormap.p4(ptr addrspace(4) %const_ptr)
+declare void @llvm.nvvm.prefetch.tensormap.p101(ptr addrspace(101) %param_ptr)
+
declare void @llvm.nvvm.prefetch.global.L2.evict.normal(ptr addrspace(1) %global_ptr)
declare void @llvm.nvvm.prefetch.global.L2.evict.last(ptr addrspace(1) %global_ptr)
@@ -78,4 +82,43 @@ define void @prefetchu_l1(ptr %ptr) {
; CHECK-PTX64-NEXT: ret;
tail call void @llvm.nvvm.prefetchu.L1(ptr %ptr)
ret void
+}
+
+define void @prefetch_tensormap(ptr %ptr) {
+; CHECK-PTX64-LABEL: prefetch_tensormap(
+; CHECK-PTX64: {
+; CHECK-PTX64-NEXT: .reg .b64 %rd<2>;
+; CHECK-PTX64-EMPTY:
+; CHECK-PTX64-NEXT: // %bb.0:
+; CHECK-PTX64-NEXT: ld.param.b64 %rd1, [prefetch_tensormap_param_0];
+; CHECK-PTX64-NEXT: prefetch.tensormap [%rd1];
+; CHECK-PTX64-NEXT: ret;
+ tail call void @llvm.nvvm.prefetch.tensormap.p0(ptr %ptr)
+ ret void
+}
+
+define void @prefetch_const_tensormap(ptr addrspace(4) %const_ptr) {
+; CHECK-PTX64-LABEL: prefetch_const_tensormap(
+; CHECK-PTX64: {
+; CHECK-PTX64-NEXT: .reg .b64 %rd<2>;
+; CHECK-PTX64-EMPTY:
+; CHECK-PTX64-NEXT: // %bb.0:
+; CHECK-PTX64-NEXT: ld.param.b64 %rd1, [prefetch_const_tensormap_param_0];
+; CHECK-PTX64-NEXT: prefetch.const.tensormap [%rd1];
+; CHECK-PTX64-NEXT: ret;
+ tail call void @llvm.nvvm.prefetch.tensormap.p4(ptr addrspace(4) %const_ptr)
+ ret void
+}
+
+define void @prefetch_param_tensormap(ptr addrspace(101) %param_ptr) {
+; CHECK-PTX64-LABEL: prefetch_param_tensormap(
+; CHECK-PTX64: {
+; CHECK-PTX64-NEXT: .reg .b64 %rd<2>;
+; CHECK-PTX64-EMPTY:
+; CHECK-PTX64-NEXT: // %bb.0:
+; CHECK-PTX64-NEXT: ld.param.b64 %rd1, [prefetch_param_tensormap_param_0];
+; CHECK-PTX64-NEXT: prefetch.param.tensormap [%rd1];
+; CHECK-PTX64-NEXT: ret;
+ tail call void @llvm.nvvm.prefetch.tensormap.p101(ptr addrspace(101) %param_ptr)
+ ret void
} \ No newline at end of file
diff --git a/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll b/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll
index 92cb51b..94c2637 100644
--- a/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll
+++ b/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll
@@ -2,19 +2,18 @@
; RUN: llc < %s -mcpu=sm_80 -mattr=+ptx70 -O0 \
; RUN: -disable-post-ra -verify-machineinstrs \
; RUN: | FileCheck -check-prefixes CHECK,CHECK-SM80 %s
-; RUN: %if ptxas-12.8 %{ llc < %s -mcpu=sm_80 -mattr=+ptx70 -O0 \
+; RUN: %if ptxas-12.9 %{ llc < %s -mcpu=sm_80 -mattr=+ptx70 -O0 \
; RUN: -disable-post-ra -verify-machineinstrs \
; RUN: | %ptxas-verify -arch=sm_80 %}
-; RUN: llc < %s -mcpu=sm_100 -mattr=+ptx87 -O0 \
+; RUN: llc < %s -mcpu=sm_100 -mattr=+ptx88 -O0 \
; RUN: -disable-post-ra -verify-machineinstrs \
; RUN: | FileCheck -check-prefixes CHECK,CHECK-SM100 %s
-; RUN: %if ptxas-12.8 %{ llc < %s -mcpu=sm_100 -mattr=+ptx87 -O0 \
+; RUN: %if ptxas-12.9 %{ llc < %s -mcpu=sm_100 -mattr=+ptx88 -O0 \
; RUN: -disable-post-ra -verify-machineinstrs \
; RUN: | %ptxas-verify -arch=sm_100 %}
target triple = "nvptx64-nvidia-cuda"
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
-; Check straight line reduction.
define half @reduce_fadd_half(<8 x half> %in) {
; CHECK-LABEL: reduce_fadd_half(
; CHECK: {
@@ -43,45 +42,22 @@ define half @reduce_fadd_half(<8 x half> %in) {
}
define half @reduce_fadd_half_reassoc(<8 x half> %in) {
-; CHECK-SM80-LABEL: reduce_fadd_half_reassoc(
-; CHECK-SM80: {
-; CHECK-SM80-NEXT: .reg .b16 %rs<6>;
-; CHECK-SM80-NEXT: .reg .b32 %r<10>;
-; CHECK-SM80-EMPTY:
-; CHECK-SM80-NEXT: // %bb.0:
-; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fadd_half_reassoc_param_0];
-; CHECK-SM80-NEXT: add.rn.f16x2 %r5, %r2, %r4;
-; CHECK-SM80-NEXT: add.rn.f16x2 %r6, %r1, %r3;
-; CHECK-SM80-NEXT: add.rn.f16x2 %r7, %r6, %r5;
-; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs1}, %r7; }
-; CHECK-SM80-NEXT: // implicit-def: %rs2
-; CHECK-SM80-NEXT: mov.b32 %r8, {%rs1, %rs2};
-; CHECK-SM80-NEXT: add.rn.f16x2 %r9, %r7, %r8;
-; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {%rs3, tmp}, %r9; }
-; CHECK-SM80-NEXT: mov.b16 %rs4, 0x0000;
-; CHECK-SM80-NEXT: add.rn.f16 %rs5, %rs3, %rs4;
-; CHECK-SM80-NEXT: st.param.b16 [func_retval0], %rs5;
-; CHECK-SM80-NEXT: ret;
-;
-; CHECK-SM100-LABEL: reduce_fadd_half_reassoc(
-; CHECK-SM100: {
-; CHECK-SM100-NEXT: .reg .b16 %rs<6>;
-; CHECK-SM100-NEXT: .reg .b32 %r<10>;
-; CHECK-SM100-EMPTY:
-; CHECK-SM100-NEXT: // %bb.0:
-; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fadd_half_reassoc_param_0];
-; CHECK-SM100-NEXT: add.rn.f16x2 %r5, %r2, %r4;
-; CHECK-SM100-NEXT: add.rn.f16x2 %r6, %r1, %r3;
-; CHECK-SM100-NEXT: add.rn.f16x2 %r7, %r6, %r5;
-; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7;
-; CHECK-SM100-NEXT: // implicit-def: %rs2
-; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2};
-; CHECK-SM100-NEXT: add.rn.f16x2 %r9, %r7, %r8;
-; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9;
-; CHECK-SM100-NEXT: mov.b16 %rs4, 0x0000;
-; CHECK-SM100-NEXT: add.rn.f16 %rs5, %rs3, %rs4;
-; CHECK-SM100-NEXT: st.param.b16 [func_retval0], %rs5;
-; CHECK-SM100-NEXT: ret;
+; CHECK-LABEL: reduce_fadd_half_reassoc(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<6>;
+; CHECK-NEXT: .reg .b32 %r<8>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fadd_half_reassoc_param_0];
+; CHECK-NEXT: add.rn.f16x2 %r5, %r2, %r4;
+; CHECK-NEXT: add.rn.f16x2 %r6, %r1, %r3;
+; CHECK-NEXT: add.rn.f16x2 %r7, %r6, %r5;
+; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r7;
+; CHECK-NEXT: add.rn.f16 %rs3, %rs1, %rs2;
+; CHECK-NEXT: mov.b16 %rs4, 0x0000;
+; CHECK-NEXT: add.rn.f16 %rs5, %rs3, %rs4;
+; CHECK-NEXT: st.param.b16 [func_retval0], %rs5;
+; CHECK-NEXT: ret;
%res = call reassoc half @llvm.vector.reduce.fadd(half 0.0, <8 x half> %in)
ret half %res
}
@@ -109,7 +85,6 @@ define half @reduce_fadd_half_reassoc_nonpow2(<7 x half> %in) {
ret half %res
}
-; Check straight-line reduction.
define float @reduce_fadd_float(<8 x float> %in) {
; CHECK-LABEL: reduce_fadd_float(
; CHECK: {
@@ -148,15 +123,15 @@ define float @reduce_fadd_float_reassoc(<8 x float> %in) {
; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fadd_float_reassoc_param_0];
; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd4;
; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2;
-; CHECK-SM80-NEXT: add.rn.f32 %r5, %r3, %r1;
+; CHECK-SM80-NEXT: add.rn.f32 %r5, %r4, %r2;
; CHECK-SM80-NEXT: mov.b64 {%r6, %r7}, %rd3;
; CHECK-SM80-NEXT: mov.b64 {%r8, %r9}, %rd1;
-; CHECK-SM80-NEXT: add.rn.f32 %r10, %r8, %r6;
-; CHECK-SM80-NEXT: add.rn.f32 %r11, %r4, %r2;
-; CHECK-SM80-NEXT: add.rn.f32 %r12, %r9, %r7;
-; CHECK-SM80-NEXT: add.rn.f32 %r13, %r12, %r11;
-; CHECK-SM80-NEXT: add.rn.f32 %r14, %r10, %r5;
-; CHECK-SM80-NEXT: add.rn.f32 %r15, %r14, %r13;
+; CHECK-SM80-NEXT: add.rn.f32 %r10, %r9, %r7;
+; CHECK-SM80-NEXT: add.rn.f32 %r11, %r10, %r5;
+; CHECK-SM80-NEXT: add.rn.f32 %r12, %r3, %r1;
+; CHECK-SM80-NEXT: add.rn.f32 %r13, %r8, %r6;
+; CHECK-SM80-NEXT: add.rn.f32 %r14, %r13, %r12;
+; CHECK-SM80-NEXT: add.rn.f32 %r15, %r14, %r11;
; CHECK-SM80-NEXT: add.rn.f32 %r16, %r15, 0f00000000;
; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r16;
; CHECK-SM80-NEXT: ret;
@@ -164,7 +139,7 @@ define float @reduce_fadd_float_reassoc(<8 x float> %in) {
; CHECK-SM100-LABEL: reduce_fadd_float_reassoc(
; CHECK-SM100: {
; CHECK-SM100-NEXT: .reg .b32 %r<5>;
-; CHECK-SM100-NEXT: .reg .b64 %rd<10>;
+; CHECK-SM100-NEXT: .reg .b64 %rd<8>;
; CHECK-SM100-EMPTY:
; CHECK-SM100-NEXT: // %bb.0:
; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fadd_float_reassoc_param_0+16];
@@ -172,11 +147,8 @@ define float @reduce_fadd_float_reassoc(<8 x float> %in) {
; CHECK-SM100-NEXT: add.rn.f32x2 %rd5, %rd2, %rd4;
; CHECK-SM100-NEXT: add.rn.f32x2 %rd6, %rd1, %rd3;
; CHECK-SM100-NEXT: add.rn.f32x2 %rd7, %rd6, %rd5;
-; CHECK-SM100-NEXT: mov.b64 {_, %r1}, %rd7;
-; CHECK-SM100-NEXT: // implicit-def: %r2
-; CHECK-SM100-NEXT: mov.b64 %rd8, {%r1, %r2};
-; CHECK-SM100-NEXT: add.rn.f32x2 %rd9, %rd7, %rd8;
-; CHECK-SM100-NEXT: mov.b64 {%r3, _}, %rd9;
+; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd7;
+; CHECK-SM100-NEXT: add.rn.f32 %r3, %r1, %r2;
; CHECK-SM100-NEXT: add.rn.f32 %r4, %r3, 0f00000000;
; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r4;
; CHECK-SM100-NEXT: ret;
@@ -229,7 +201,6 @@ define float @reduce_fadd_float_reassoc_nonpow2(<7 x float> %in) {
ret float %res
}
-; Check straight line reduction.
define half @reduce_fmul_half(<8 x half> %in) {
; CHECK-LABEL: reduce_fmul_half(
; CHECK: {
@@ -256,41 +227,20 @@ define half @reduce_fmul_half(<8 x half> %in) {
}
define half @reduce_fmul_half_reassoc(<8 x half> %in) {
-; CHECK-SM80-LABEL: reduce_fmul_half_reassoc(
-; CHECK-SM80: {
-; CHECK-SM80-NEXT: .reg .b16 %rs<4>;
-; CHECK-SM80-NEXT: .reg .b32 %r<10>;
-; CHECK-SM80-EMPTY:
-; CHECK-SM80-NEXT: // %bb.0:
-; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmul_half_reassoc_param_0];
-; CHECK-SM80-NEXT: mul.rn.f16x2 %r5, %r2, %r4;
-; CHECK-SM80-NEXT: mul.rn.f16x2 %r6, %r1, %r3;
-; CHECK-SM80-NEXT: mul.rn.f16x2 %r7, %r6, %r5;
-; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs1}, %r7; }
-; CHECK-SM80-NEXT: // implicit-def: %rs2
-; CHECK-SM80-NEXT: mov.b32 %r8, {%rs1, %rs2};
-; CHECK-SM80-NEXT: mul.rn.f16x2 %r9, %r7, %r8;
-; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {%rs3, tmp}, %r9; }
-; CHECK-SM80-NEXT: st.param.b16 [func_retval0], %rs3;
-; CHECK-SM80-NEXT: ret;
-;
-; CHECK-SM100-LABEL: reduce_fmul_half_reassoc(
-; CHECK-SM100: {
-; CHECK-SM100-NEXT: .reg .b16 %rs<4>;
-; CHECK-SM100-NEXT: .reg .b32 %r<10>;
-; CHECK-SM100-EMPTY:
-; CHECK-SM100-NEXT: // %bb.0:
-; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmul_half_reassoc_param_0];
-; CHECK-SM100-NEXT: mul.rn.f16x2 %r5, %r2, %r4;
-; CHECK-SM100-NEXT: mul.rn.f16x2 %r6, %r1, %r3;
-; CHECK-SM100-NEXT: mul.rn.f16x2 %r7, %r6, %r5;
-; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7;
-; CHECK-SM100-NEXT: // implicit-def: %rs2
-; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2};
-; CHECK-SM100-NEXT: mul.rn.f16x2 %r9, %r7, %r8;
-; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9;
-; CHECK-SM100-NEXT: st.param.b16 [func_retval0], %rs3;
-; CHECK-SM100-NEXT: ret;
+; CHECK-LABEL: reduce_fmul_half_reassoc(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<4>;
+; CHECK-NEXT: .reg .b32 %r<8>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmul_half_reassoc_param_0];
+; CHECK-NEXT: mul.rn.f16x2 %r5, %r2, %r4;
+; CHECK-NEXT: mul.rn.f16x2 %r6, %r1, %r3;
+; CHECK-NEXT: mul.rn.f16x2 %r7, %r6, %r5;
+; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r7;
+; CHECK-NEXT: mul.rn.f16 %rs3, %rs1, %rs2;
+; CHECK-NEXT: st.param.b16 [func_retval0], %rs3;
+; CHECK-NEXT: ret;
%res = call reassoc half @llvm.vector.reduce.fmul(half 1.0, <8 x half> %in)
ret half %res
}
@@ -321,7 +271,6 @@ define half @reduce_fmul_half_reassoc_nonpow2(<7 x half> %in) {
ret half %res
}
-; Check straight-line reduction.
define float @reduce_fmul_float(<8 x float> %in) {
; CHECK-LABEL: reduce_fmul_float(
; CHECK: {
@@ -359,22 +308,22 @@ define float @reduce_fmul_float_reassoc(<8 x float> %in) {
; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmul_float_reassoc_param_0];
; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd4;
; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2;
-; CHECK-SM80-NEXT: mul.rn.f32 %r5, %r3, %r1;
+; CHECK-SM80-NEXT: mul.rn.f32 %r5, %r4, %r2;
; CHECK-SM80-NEXT: mov.b64 {%r6, %r7}, %rd3;
; CHECK-SM80-NEXT: mov.b64 {%r8, %r9}, %rd1;
-; CHECK-SM80-NEXT: mul.rn.f32 %r10, %r8, %r6;
-; CHECK-SM80-NEXT: mul.rn.f32 %r11, %r4, %r2;
-; CHECK-SM80-NEXT: mul.rn.f32 %r12, %r9, %r7;
-; CHECK-SM80-NEXT: mul.rn.f32 %r13, %r12, %r11;
-; CHECK-SM80-NEXT: mul.rn.f32 %r14, %r10, %r5;
-; CHECK-SM80-NEXT: mul.rn.f32 %r15, %r14, %r13;
+; CHECK-SM80-NEXT: mul.rn.f32 %r10, %r9, %r7;
+; CHECK-SM80-NEXT: mul.rn.f32 %r11, %r10, %r5;
+; CHECK-SM80-NEXT: mul.rn.f32 %r12, %r3, %r1;
+; CHECK-SM80-NEXT: mul.rn.f32 %r13, %r8, %r6;
+; CHECK-SM80-NEXT: mul.rn.f32 %r14, %r13, %r12;
+; CHECK-SM80-NEXT: mul.rn.f32 %r15, %r14, %r11;
; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15;
; CHECK-SM80-NEXT: ret;
;
; CHECK-SM100-LABEL: reduce_fmul_float_reassoc(
; CHECK-SM100: {
; CHECK-SM100-NEXT: .reg .b32 %r<4>;
-; CHECK-SM100-NEXT: .reg .b64 %rd<10>;
+; CHECK-SM100-NEXT: .reg .b64 %rd<8>;
; CHECK-SM100-EMPTY:
; CHECK-SM100-NEXT: // %bb.0:
; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmul_float_reassoc_param_0+16];
@@ -382,11 +331,8 @@ define float @reduce_fmul_float_reassoc(<8 x float> %in) {
; CHECK-SM100-NEXT: mul.rn.f32x2 %rd5, %rd2, %rd4;
; CHECK-SM100-NEXT: mul.rn.f32x2 %rd6, %rd1, %rd3;
; CHECK-SM100-NEXT: mul.rn.f32x2 %rd7, %rd6, %rd5;
-; CHECK-SM100-NEXT: mov.b64 {_, %r1}, %rd7;
-; CHECK-SM100-NEXT: // implicit-def: %r2
-; CHECK-SM100-NEXT: mov.b64 %rd8, {%r1, %r2};
-; CHECK-SM100-NEXT: mul.rn.f32x2 %rd9, %rd7, %rd8;
-; CHECK-SM100-NEXT: mov.b64 {%r3, _}, %rd9;
+; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd7;
+; CHECK-SM100-NEXT: mul.rn.f32 %r3, %r1, %r2;
; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r3;
; CHECK-SM100-NEXT: ret;
%res = call reassoc float @llvm.vector.reduce.fmul(float 1.0, <8 x float> %in)
@@ -436,7 +382,6 @@ define float @reduce_fmul_float_reassoc_nonpow2(<7 x float> %in) {
ret float %res
}
-; Check straight line reduction.
define half @reduce_fmax_half(<8 x half> %in) {
; CHECK-LABEL: reduce_fmax_half(
; CHECK: {
@@ -501,84 +446,256 @@ define half @reduce_fmax_half_reassoc_nonpow2(<7 x half> %in) {
ret half %res
}
-; Check straight-line reduction.
-define float @reduce_fmax_float(<8 x float> %in) {
-;
-; CHECK-LABEL: reduce_fmax_float(
+define half @reduce_fmax_half_nnan(<8 x half> %in) {
+; CHECK-LABEL: reduce_fmax_half_nnan(
; CHECK: {
-; CHECK-NEXT: .reg .b32 %r<16>;
-; CHECK-NEXT: .reg .b64 %rd<5>;
+; CHECK-NEXT: .reg .b16 %rs<4>;
+; CHECK-NEXT: .reg .b32 %r<8>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmax_float_param_0+16];
-; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmax_float_param_0];
-; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4;
-; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2;
-; CHECK-NEXT: max.f32 %r5, %r4, %r2;
-; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3;
-; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1;
-; CHECK-NEXT: max.f32 %r10, %r9, %r7;
-; CHECK-NEXT: max.f32 %r11, %r10, %r5;
-; CHECK-NEXT: max.f32 %r12, %r3, %r1;
-; CHECK-NEXT: max.f32 %r13, %r8, %r6;
-; CHECK-NEXT: max.f32 %r14, %r13, %r12;
-; CHECK-NEXT: max.f32 %r15, %r14, %r11;
-; CHECK-NEXT: st.param.b32 [func_retval0], %r15;
+; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmax_half_nnan_param_0];
+; CHECK-NEXT: max.f16x2 %r5, %r2, %r4;
+; CHECK-NEXT: max.f16x2 %r6, %r1, %r3;
+; CHECK-NEXT: max.f16x2 %r7, %r6, %r5;
+; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r7;
+; CHECK-NEXT: max.f16 %rs3, %rs1, %rs2;
+; CHECK-NEXT: st.param.b16 [func_retval0], %rs3;
; CHECK-NEXT: ret;
- %res = call float @llvm.vector.reduce.fmax(<8 x float> %in)
- ret float %res
+ %res = call nnan half @llvm.vector.reduce.fmax(<8 x half> %in)
+ ret half %res
}
-define float @reduce_fmax_float_reassoc(<8 x float> %in) {
-;
-; CHECK-LABEL: reduce_fmax_float_reassoc(
+define half @reduce_fmax_half_nnan_nonpow2(<7 x half> %in) {
+; CHECK-LABEL: reduce_fmax_half_nnan_nonpow2(
; CHECK: {
-; CHECK-NEXT: .reg .b32 %r<16>;
-; CHECK-NEXT: .reg .b64 %rd<5>;
+; CHECK-NEXT: .reg .b16 %rs<12>;
+; CHECK-NEXT: .reg .b32 %r<8>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmax_float_reassoc_param_0+16];
-; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmax_float_reassoc_param_0];
-; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4;
-; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2;
-; CHECK-NEXT: max.f32 %r5, %r4, %r2;
-; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3;
-; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1;
-; CHECK-NEXT: max.f32 %r10, %r9, %r7;
-; CHECK-NEXT: max.f32 %r11, %r10, %r5;
-; CHECK-NEXT: max.f32 %r12, %r3, %r1;
-; CHECK-NEXT: max.f32 %r13, %r8, %r6;
-; CHECK-NEXT: max.f32 %r14, %r13, %r12;
-; CHECK-NEXT: max.f32 %r15, %r14, %r11;
-; CHECK-NEXT: st.param.b32 [func_retval0], %r15;
+; CHECK-NEXT: ld.param.b32 %r1, [reduce_fmax_half_nnan_nonpow2_param_0+8];
+; CHECK-NEXT: mov.b32 {%rs5, %rs6}, %r1;
+; CHECK-NEXT: ld.param.v2.b32 {%r2, %r3}, [reduce_fmax_half_nnan_nonpow2_param_0];
+; CHECK-NEXT: mov.b32 {%rs3, %rs4}, %r3;
+; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r2;
+; CHECK-NEXT: ld.param.b16 %rs7, [reduce_fmax_half_nnan_nonpow2_param_0+12];
+; CHECK-NEXT: max.f16x2 %r4, %r2, %r1;
+; CHECK-NEXT: mov.b16 %rs8, 0xFC00;
+; CHECK-NEXT: mov.b32 %r5, {%rs7, %rs8};
+; CHECK-NEXT: max.f16x2 %r6, %r3, %r5;
+; CHECK-NEXT: max.f16x2 %r7, %r4, %r6;
+; CHECK-NEXT: mov.b32 {%rs9, %rs10}, %r7;
+; CHECK-NEXT: max.f16 %rs11, %rs9, %rs10;
+; CHECK-NEXT: st.param.b16 [func_retval0], %rs11;
; CHECK-NEXT: ret;
+ %res = call nnan half @llvm.vector.reduce.fmax(<7 x half> %in)
+ ret half %res
+}
+
+define float @reduce_fmax_float(<8 x float> %in) {
+; CHECK-SM80-LABEL: reduce_fmax_float(
+; CHECK-SM80: {
+; CHECK-SM80-NEXT: .reg .b32 %r<16>;
+; CHECK-SM80-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT: // %bb.0:
+; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmax_float_param_0];
+; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1;
+; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2;
+; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmax_float_param_0+16];
+; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3;
+; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4;
+; CHECK-SM80-NEXT: max.f32 %r9, %r7, %r8;
+; CHECK-SM80-NEXT: max.f32 %r10, %r5, %r6;
+; CHECK-SM80-NEXT: max.f32 %r11, %r10, %r9;
+; CHECK-SM80-NEXT: max.f32 %r12, %r3, %r4;
+; CHECK-SM80-NEXT: max.f32 %r13, %r1, %r2;
+; CHECK-SM80-NEXT: max.f32 %r14, %r13, %r12;
+; CHECK-SM80-NEXT: max.f32 %r15, %r14, %r11;
+; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15;
+; CHECK-SM80-NEXT: ret;
+;
+; CHECK-SM100-LABEL: reduce_fmax_float(
+; CHECK-SM100: {
+; CHECK-SM100-NEXT: .reg .b32 %r<13>;
+; CHECK-SM100-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT: // %bb.0:
+; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmax_float_param_0+16];
+; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4;
+; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmax_float_param_0];
+; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3;
+; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2;
+; CHECK-SM100-NEXT: max.f32 %r9, %r8, %r5, %r6;
+; CHECK-SM100-NEXT: max.f32 %r10, %r3, %r4, %r7;
+; CHECK-SM100-NEXT: max.f32 %r11, %r10, %r9, %r1;
+; CHECK-SM100-NEXT: max.f32 %r12, %r11, %r2;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12;
+; CHECK-SM100-NEXT: ret;
+ %res = call float @llvm.vector.reduce.fmax(<8 x float> %in)
+ ret float %res
+}
+
+define float @reduce_fmax_float_reassoc(<8 x float> %in) {
+; CHECK-SM80-LABEL: reduce_fmax_float_reassoc(
+; CHECK-SM80: {
+; CHECK-SM80-NEXT: .reg .b32 %r<16>;
+; CHECK-SM80-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT: // %bb.0:
+; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmax_float_reassoc_param_0];
+; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1;
+; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2;
+; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmax_float_reassoc_param_0+16];
+; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3;
+; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4;
+; CHECK-SM80-NEXT: max.f32 %r9, %r7, %r8;
+; CHECK-SM80-NEXT: max.f32 %r10, %r5, %r6;
+; CHECK-SM80-NEXT: max.f32 %r11, %r10, %r9;
+; CHECK-SM80-NEXT: max.f32 %r12, %r3, %r4;
+; CHECK-SM80-NEXT: max.f32 %r13, %r1, %r2;
+; CHECK-SM80-NEXT: max.f32 %r14, %r13, %r12;
+; CHECK-SM80-NEXT: max.f32 %r15, %r14, %r11;
+; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15;
+; CHECK-SM80-NEXT: ret;
+;
+; CHECK-SM100-LABEL: reduce_fmax_float_reassoc(
+; CHECK-SM100: {
+; CHECK-SM100-NEXT: .reg .b32 %r<13>;
+; CHECK-SM100-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT: // %bb.0:
+; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmax_float_reassoc_param_0+16];
+; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4;
+; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmax_float_reassoc_param_0];
+; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3;
+; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2;
+; CHECK-SM100-NEXT: max.f32 %r9, %r8, %r5, %r6;
+; CHECK-SM100-NEXT: max.f32 %r10, %r3, %r4, %r7;
+; CHECK-SM100-NEXT: max.f32 %r11, %r10, %r9, %r1;
+; CHECK-SM100-NEXT: max.f32 %r12, %r11, %r2;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12;
+; CHECK-SM100-NEXT: ret;
%res = call reassoc float @llvm.vector.reduce.fmax(<8 x float> %in)
ret float %res
}
define float @reduce_fmax_float_reassoc_nonpow2(<7 x float> %in) {
+; CHECK-SM80-LABEL: reduce_fmax_float_reassoc_nonpow2(
+; CHECK-SM80: {
+; CHECK-SM80-NEXT: .reg .b32 %r<14>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT: // %bb.0:
+; CHECK-SM80-NEXT: ld.param.b32 %r7, [reduce_fmax_float_reassoc_nonpow2_param_0+24];
+; CHECK-SM80-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmax_float_reassoc_nonpow2_param_0+16];
+; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmax_float_reassoc_nonpow2_param_0];
+; CHECK-SM80-NEXT: max.f32 %r8, %r5, %r6;
+; CHECK-SM80-NEXT: max.f32 %r9, %r8, %r7;
+; CHECK-SM80-NEXT: max.f32 %r10, %r3, %r4;
+; CHECK-SM80-NEXT: max.f32 %r11, %r1, %r2;
+; CHECK-SM80-NEXT: max.f32 %r12, %r11, %r10;
+; CHECK-SM80-NEXT: max.f32 %r13, %r12, %r9;
+; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r13;
+; CHECK-SM80-NEXT: ret;
;
-; CHECK-LABEL: reduce_fmax_float_reassoc_nonpow2(
-; CHECK: {
-; CHECK-NEXT: .reg .b32 %r<14>;
-; CHECK-EMPTY:
-; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.b32 %r7, [reduce_fmax_float_reassoc_nonpow2_param_0+24];
-; CHECK-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmax_float_reassoc_nonpow2_param_0+16];
-; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmax_float_reassoc_nonpow2_param_0];
-; CHECK-NEXT: max.f32 %r8, %r3, %r7;
-; CHECK-NEXT: max.f32 %r9, %r1, %r5;
-; CHECK-NEXT: max.f32 %r10, %r9, %r8;
-; CHECK-NEXT: max.f32 %r11, %r2, %r6;
-; CHECK-NEXT: max.f32 %r12, %r11, %r4;
-; CHECK-NEXT: max.f32 %r13, %r10, %r12;
-; CHECK-NEXT: st.param.b32 [func_retval0], %r13;
-; CHECK-NEXT: ret;
+; CHECK-SM100-LABEL: reduce_fmax_float_reassoc_nonpow2(
+; CHECK-SM100: {
+; CHECK-SM100-NEXT: .reg .b32 %r<11>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT: // %bb.0:
+; CHECK-SM100-NEXT: ld.param.b32 %r7, [reduce_fmax_float_reassoc_nonpow2_param_0+24];
+; CHECK-SM100-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmax_float_reassoc_nonpow2_param_0+16];
+; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmax_float_reassoc_nonpow2_param_0];
+; CHECK-SM100-NEXT: max.f32 %r8, %r4, %r5, %r6;
+; CHECK-SM100-NEXT: max.f32 %r9, %r1, %r2, %r3;
+; CHECK-SM100-NEXT: max.f32 %r10, %r9, %r8, %r7;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10;
+; CHECK-SM100-NEXT: ret;
%res = call reassoc float @llvm.vector.reduce.fmax(<7 x float> %in)
ret float %res
}
-; Check straight line reduction.
+define float @reduce_fmax_float_nnan(<8 x float> %in) {
+; CHECK-SM80-LABEL: reduce_fmax_float_nnan(
+; CHECK-SM80: {
+; CHECK-SM80-NEXT: .reg .b32 %r<16>;
+; CHECK-SM80-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT: // %bb.0:
+; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmax_float_nnan_param_0];
+; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1;
+; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2;
+; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmax_float_nnan_param_0+16];
+; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3;
+; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4;
+; CHECK-SM80-NEXT: max.f32 %r9, %r7, %r8;
+; CHECK-SM80-NEXT: max.f32 %r10, %r5, %r6;
+; CHECK-SM80-NEXT: max.f32 %r11, %r10, %r9;
+; CHECK-SM80-NEXT: max.f32 %r12, %r3, %r4;
+; CHECK-SM80-NEXT: max.f32 %r13, %r1, %r2;
+; CHECK-SM80-NEXT: max.f32 %r14, %r13, %r12;
+; CHECK-SM80-NEXT: max.f32 %r15, %r14, %r11;
+; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15;
+; CHECK-SM80-NEXT: ret;
+;
+; CHECK-SM100-LABEL: reduce_fmax_float_nnan(
+; CHECK-SM100: {
+; CHECK-SM100-NEXT: .reg .b32 %r<13>;
+; CHECK-SM100-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT: // %bb.0:
+; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmax_float_nnan_param_0+16];
+; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4;
+; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmax_float_nnan_param_0];
+; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3;
+; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2;
+; CHECK-SM100-NEXT: max.f32 %r9, %r8, %r5, %r6;
+; CHECK-SM100-NEXT: max.f32 %r10, %r3, %r4, %r7;
+; CHECK-SM100-NEXT: max.f32 %r11, %r10, %r9, %r1;
+; CHECK-SM100-NEXT: max.f32 %r12, %r11, %r2;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12;
+; CHECK-SM100-NEXT: ret;
+ %res = call nnan float @llvm.vector.reduce.fmax(<8 x float> %in)
+ ret float %res
+}
+
+define float @reduce_fmax_float_nnan_nonpow2(<7 x float> %in) {
+; CHECK-SM80-LABEL: reduce_fmax_float_nnan_nonpow2(
+; CHECK-SM80: {
+; CHECK-SM80-NEXT: .reg .b32 %r<14>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT: // %bb.0:
+; CHECK-SM80-NEXT: ld.param.b32 %r7, [reduce_fmax_float_nnan_nonpow2_param_0+24];
+; CHECK-SM80-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmax_float_nnan_nonpow2_param_0+16];
+; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmax_float_nnan_nonpow2_param_0];
+; CHECK-SM80-NEXT: max.f32 %r8, %r5, %r6;
+; CHECK-SM80-NEXT: max.f32 %r9, %r8, %r7;
+; CHECK-SM80-NEXT: max.f32 %r10, %r3, %r4;
+; CHECK-SM80-NEXT: max.f32 %r11, %r1, %r2;
+; CHECK-SM80-NEXT: max.f32 %r12, %r11, %r10;
+; CHECK-SM80-NEXT: max.f32 %r13, %r12, %r9;
+; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r13;
+; CHECK-SM80-NEXT: ret;
+;
+; CHECK-SM100-LABEL: reduce_fmax_float_nnan_nonpow2(
+; CHECK-SM100: {
+; CHECK-SM100-NEXT: .reg .b32 %r<11>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT: // %bb.0:
+; CHECK-SM100-NEXT: ld.param.b32 %r7, [reduce_fmax_float_nnan_nonpow2_param_0+24];
+; CHECK-SM100-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmax_float_nnan_nonpow2_param_0+16];
+; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmax_float_nnan_nonpow2_param_0];
+; CHECK-SM100-NEXT: max.f32 %r8, %r4, %r5, %r6;
+; CHECK-SM100-NEXT: max.f32 %r9, %r1, %r2, %r3;
+; CHECK-SM100-NEXT: max.f32 %r10, %r9, %r8, %r7;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10;
+; CHECK-SM100-NEXT: ret;
+ %res = call nnan float @llvm.vector.reduce.fmax(<7 x float> %in)
+ ret float %res
+}
+
define half @reduce_fmin_half(<8 x half> %in) {
; CHECK-LABEL: reduce_fmin_half(
; CHECK: {
@@ -643,84 +760,256 @@ define half @reduce_fmin_half_reassoc_nonpow2(<7 x half> %in) {
ret half %res
}
-; Check straight-line reduction.
-define float @reduce_fmin_float(<8 x float> %in) {
-;
-; CHECK-LABEL: reduce_fmin_float(
+define half @reduce_fmin_half_nnan(<8 x half> %in) {
+; CHECK-LABEL: reduce_fmin_half_nnan(
; CHECK: {
-; CHECK-NEXT: .reg .b32 %r<16>;
-; CHECK-NEXT: .reg .b64 %rd<5>;
+; CHECK-NEXT: .reg .b16 %rs<4>;
+; CHECK-NEXT: .reg .b32 %r<8>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmin_float_param_0+16];
-; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmin_float_param_0];
-; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4;
-; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2;
-; CHECK-NEXT: min.f32 %r5, %r4, %r2;
-; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3;
-; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1;
-; CHECK-NEXT: min.f32 %r10, %r9, %r7;
-; CHECK-NEXT: min.f32 %r11, %r10, %r5;
-; CHECK-NEXT: min.f32 %r12, %r3, %r1;
-; CHECK-NEXT: min.f32 %r13, %r8, %r6;
-; CHECK-NEXT: min.f32 %r14, %r13, %r12;
-; CHECK-NEXT: min.f32 %r15, %r14, %r11;
-; CHECK-NEXT: st.param.b32 [func_retval0], %r15;
+; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmin_half_nnan_param_0];
+; CHECK-NEXT: min.f16x2 %r5, %r2, %r4;
+; CHECK-NEXT: min.f16x2 %r6, %r1, %r3;
+; CHECK-NEXT: min.f16x2 %r7, %r6, %r5;
+; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r7;
+; CHECK-NEXT: min.f16 %rs3, %rs1, %rs2;
+; CHECK-NEXT: st.param.b16 [func_retval0], %rs3;
; CHECK-NEXT: ret;
- %res = call float @llvm.vector.reduce.fmin(<8 x float> %in)
- ret float %res
+ %res = call nnan half @llvm.vector.reduce.fmin(<8 x half> %in)
+ ret half %res
}
-define float @reduce_fmin_float_reassoc(<8 x float> %in) {
-;
-; CHECK-LABEL: reduce_fmin_float_reassoc(
+define half @reduce_fmin_half_nnan_nonpow2(<7 x half> %in) {
+; CHECK-LABEL: reduce_fmin_half_nnan_nonpow2(
; CHECK: {
-; CHECK-NEXT: .reg .b32 %r<16>;
-; CHECK-NEXT: .reg .b64 %rd<5>;
+; CHECK-NEXT: .reg .b16 %rs<12>;
+; CHECK-NEXT: .reg .b32 %r<8>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmin_float_reassoc_param_0+16];
-; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmin_float_reassoc_param_0];
-; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4;
-; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2;
-; CHECK-NEXT: min.f32 %r5, %r4, %r2;
-; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3;
-; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1;
-; CHECK-NEXT: min.f32 %r10, %r9, %r7;
-; CHECK-NEXT: min.f32 %r11, %r10, %r5;
-; CHECK-NEXT: min.f32 %r12, %r3, %r1;
-; CHECK-NEXT: min.f32 %r13, %r8, %r6;
-; CHECK-NEXT: min.f32 %r14, %r13, %r12;
-; CHECK-NEXT: min.f32 %r15, %r14, %r11;
-; CHECK-NEXT: st.param.b32 [func_retval0], %r15;
+; CHECK-NEXT: ld.param.b32 %r1, [reduce_fmin_half_nnan_nonpow2_param_0+8];
+; CHECK-NEXT: mov.b32 {%rs5, %rs6}, %r1;
+; CHECK-NEXT: ld.param.v2.b32 {%r2, %r3}, [reduce_fmin_half_nnan_nonpow2_param_0];
+; CHECK-NEXT: mov.b32 {%rs3, %rs4}, %r3;
+; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r2;
+; CHECK-NEXT: ld.param.b16 %rs7, [reduce_fmin_half_nnan_nonpow2_param_0+12];
+; CHECK-NEXT: min.f16x2 %r4, %r2, %r1;
+; CHECK-NEXT: mov.b16 %rs8, 0x7C00;
+; CHECK-NEXT: mov.b32 %r5, {%rs7, %rs8};
+; CHECK-NEXT: min.f16x2 %r6, %r3, %r5;
+; CHECK-NEXT: min.f16x2 %r7, %r4, %r6;
+; CHECK-NEXT: mov.b32 {%rs9, %rs10}, %r7;
+; CHECK-NEXT: min.f16 %rs11, %rs9, %rs10;
+; CHECK-NEXT: st.param.b16 [func_retval0], %rs11;
; CHECK-NEXT: ret;
+ %res = call nnan half @llvm.vector.reduce.fmin(<7 x half> %in)
+ ret half %res
+}
+
+define float @reduce_fmin_float(<8 x float> %in) {
+; CHECK-SM80-LABEL: reduce_fmin_float(
+; CHECK-SM80: {
+; CHECK-SM80-NEXT: .reg .b32 %r<16>;
+; CHECK-SM80-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT: // %bb.0:
+; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmin_float_param_0];
+; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1;
+; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2;
+; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmin_float_param_0+16];
+; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3;
+; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4;
+; CHECK-SM80-NEXT: min.f32 %r9, %r7, %r8;
+; CHECK-SM80-NEXT: min.f32 %r10, %r5, %r6;
+; CHECK-SM80-NEXT: min.f32 %r11, %r10, %r9;
+; CHECK-SM80-NEXT: min.f32 %r12, %r3, %r4;
+; CHECK-SM80-NEXT: min.f32 %r13, %r1, %r2;
+; CHECK-SM80-NEXT: min.f32 %r14, %r13, %r12;
+; CHECK-SM80-NEXT: min.f32 %r15, %r14, %r11;
+; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15;
+; CHECK-SM80-NEXT: ret;
+;
+; CHECK-SM100-LABEL: reduce_fmin_float(
+; CHECK-SM100: {
+; CHECK-SM100-NEXT: .reg .b32 %r<13>;
+; CHECK-SM100-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT: // %bb.0:
+; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmin_float_param_0+16];
+; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4;
+; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmin_float_param_0];
+; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3;
+; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2;
+; CHECK-SM100-NEXT: min.f32 %r9, %r8, %r5, %r6;
+; CHECK-SM100-NEXT: min.f32 %r10, %r3, %r4, %r7;
+; CHECK-SM100-NEXT: min.f32 %r11, %r10, %r9, %r1;
+; CHECK-SM100-NEXT: min.f32 %r12, %r11, %r2;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12;
+; CHECK-SM100-NEXT: ret;
+ %res = call float @llvm.vector.reduce.fmin(<8 x float> %in)
+ ret float %res
+}
+
+define float @reduce_fmin_float_reassoc(<8 x float> %in) {
+; CHECK-SM80-LABEL: reduce_fmin_float_reassoc(
+; CHECK-SM80: {
+; CHECK-SM80-NEXT: .reg .b32 %r<16>;
+; CHECK-SM80-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT: // %bb.0:
+; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmin_float_reassoc_param_0];
+; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1;
+; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2;
+; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmin_float_reassoc_param_0+16];
+; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3;
+; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4;
+; CHECK-SM80-NEXT: min.f32 %r9, %r7, %r8;
+; CHECK-SM80-NEXT: min.f32 %r10, %r5, %r6;
+; CHECK-SM80-NEXT: min.f32 %r11, %r10, %r9;
+; CHECK-SM80-NEXT: min.f32 %r12, %r3, %r4;
+; CHECK-SM80-NEXT: min.f32 %r13, %r1, %r2;
+; CHECK-SM80-NEXT: min.f32 %r14, %r13, %r12;
+; CHECK-SM80-NEXT: min.f32 %r15, %r14, %r11;
+; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15;
+; CHECK-SM80-NEXT: ret;
+;
+; CHECK-SM100-LABEL: reduce_fmin_float_reassoc(
+; CHECK-SM100: {
+; CHECK-SM100-NEXT: .reg .b32 %r<13>;
+; CHECK-SM100-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT: // %bb.0:
+; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmin_float_reassoc_param_0+16];
+; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4;
+; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmin_float_reassoc_param_0];
+; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3;
+; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2;
+; CHECK-SM100-NEXT: min.f32 %r9, %r8, %r5, %r6;
+; CHECK-SM100-NEXT: min.f32 %r10, %r3, %r4, %r7;
+; CHECK-SM100-NEXT: min.f32 %r11, %r10, %r9, %r1;
+; CHECK-SM100-NEXT: min.f32 %r12, %r11, %r2;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12;
+; CHECK-SM100-NEXT: ret;
%res = call reassoc float @llvm.vector.reduce.fmin(<8 x float> %in)
ret float %res
}
define float @reduce_fmin_float_reassoc_nonpow2(<7 x float> %in) {
+; CHECK-SM80-LABEL: reduce_fmin_float_reassoc_nonpow2(
+; CHECK-SM80: {
+; CHECK-SM80-NEXT: .reg .b32 %r<14>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT: // %bb.0:
+; CHECK-SM80-NEXT: ld.param.b32 %r7, [reduce_fmin_float_reassoc_nonpow2_param_0+24];
+; CHECK-SM80-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmin_float_reassoc_nonpow2_param_0+16];
+; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmin_float_reassoc_nonpow2_param_0];
+; CHECK-SM80-NEXT: min.f32 %r8, %r5, %r6;
+; CHECK-SM80-NEXT: min.f32 %r9, %r8, %r7;
+; CHECK-SM80-NEXT: min.f32 %r10, %r3, %r4;
+; CHECK-SM80-NEXT: min.f32 %r11, %r1, %r2;
+; CHECK-SM80-NEXT: min.f32 %r12, %r11, %r10;
+; CHECK-SM80-NEXT: min.f32 %r13, %r12, %r9;
+; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r13;
+; CHECK-SM80-NEXT: ret;
;
-; CHECK-LABEL: reduce_fmin_float_reassoc_nonpow2(
-; CHECK: {
-; CHECK-NEXT: .reg .b32 %r<14>;
-; CHECK-EMPTY:
-; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.b32 %r7, [reduce_fmin_float_reassoc_nonpow2_param_0+24];
-; CHECK-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmin_float_reassoc_nonpow2_param_0+16];
-; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmin_float_reassoc_nonpow2_param_0];
-; CHECK-NEXT: min.f32 %r8, %r3, %r7;
-; CHECK-NEXT: min.f32 %r9, %r1, %r5;
-; CHECK-NEXT: min.f32 %r10, %r9, %r8;
-; CHECK-NEXT: min.f32 %r11, %r2, %r6;
-; CHECK-NEXT: min.f32 %r12, %r11, %r4;
-; CHECK-NEXT: min.f32 %r13, %r10, %r12;
-; CHECK-NEXT: st.param.b32 [func_retval0], %r13;
-; CHECK-NEXT: ret;
+; CHECK-SM100-LABEL: reduce_fmin_float_reassoc_nonpow2(
+; CHECK-SM100: {
+; CHECK-SM100-NEXT: .reg .b32 %r<11>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT: // %bb.0:
+; CHECK-SM100-NEXT: ld.param.b32 %r7, [reduce_fmin_float_reassoc_nonpow2_param_0+24];
+; CHECK-SM100-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmin_float_reassoc_nonpow2_param_0+16];
+; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmin_float_reassoc_nonpow2_param_0];
+; CHECK-SM100-NEXT: min.f32 %r8, %r4, %r5, %r6;
+; CHECK-SM100-NEXT: min.f32 %r9, %r1, %r2, %r3;
+; CHECK-SM100-NEXT: min.f32 %r10, %r9, %r8, %r7;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10;
+; CHECK-SM100-NEXT: ret;
%res = call reassoc float @llvm.vector.reduce.fmin(<7 x float> %in)
ret float %res
}
-; Check straight-line reduction.
+define float @reduce_fmin_float_nnan(<8 x float> %in) {
+; CHECK-SM80-LABEL: reduce_fmin_float_nnan(
+; CHECK-SM80: {
+; CHECK-SM80-NEXT: .reg .b32 %r<16>;
+; CHECK-SM80-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT: // %bb.0:
+; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmin_float_nnan_param_0];
+; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1;
+; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2;
+; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmin_float_nnan_param_0+16];
+; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3;
+; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4;
+; CHECK-SM80-NEXT: min.f32 %r9, %r7, %r8;
+; CHECK-SM80-NEXT: min.f32 %r10, %r5, %r6;
+; CHECK-SM80-NEXT: min.f32 %r11, %r10, %r9;
+; CHECK-SM80-NEXT: min.f32 %r12, %r3, %r4;
+; CHECK-SM80-NEXT: min.f32 %r13, %r1, %r2;
+; CHECK-SM80-NEXT: min.f32 %r14, %r13, %r12;
+; CHECK-SM80-NEXT: min.f32 %r15, %r14, %r11;
+; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15;
+; CHECK-SM80-NEXT: ret;
+;
+; CHECK-SM100-LABEL: reduce_fmin_float_nnan(
+; CHECK-SM100: {
+; CHECK-SM100-NEXT: .reg .b32 %r<13>;
+; CHECK-SM100-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT: // %bb.0:
+; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmin_float_nnan_param_0+16];
+; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4;
+; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmin_float_nnan_param_0];
+; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3;
+; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2;
+; CHECK-SM100-NEXT: min.f32 %r9, %r8, %r5, %r6;
+; CHECK-SM100-NEXT: min.f32 %r10, %r3, %r4, %r7;
+; CHECK-SM100-NEXT: min.f32 %r11, %r10, %r9, %r1;
+; CHECK-SM100-NEXT: min.f32 %r12, %r11, %r2;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12;
+; CHECK-SM100-NEXT: ret;
+ %res = call nnan float @llvm.vector.reduce.fmin(<8 x float> %in)
+ ret float %res
+}
+
+define float @reduce_fmin_float_nnan_nonpow2(<7 x float> %in) {
+; CHECK-SM80-LABEL: reduce_fmin_float_nnan_nonpow2(
+; CHECK-SM80: {
+; CHECK-SM80-NEXT: .reg .b32 %r<14>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT: // %bb.0:
+; CHECK-SM80-NEXT: ld.param.b32 %r7, [reduce_fmin_float_nnan_nonpow2_param_0+24];
+; CHECK-SM80-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmin_float_nnan_nonpow2_param_0+16];
+; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmin_float_nnan_nonpow2_param_0];
+; CHECK-SM80-NEXT: min.f32 %r8, %r5, %r6;
+; CHECK-SM80-NEXT: min.f32 %r9, %r8, %r7;
+; CHECK-SM80-NEXT: min.f32 %r10, %r3, %r4;
+; CHECK-SM80-NEXT: min.f32 %r11, %r1, %r2;
+; CHECK-SM80-NEXT: min.f32 %r12, %r11, %r10;
+; CHECK-SM80-NEXT: min.f32 %r13, %r12, %r9;
+; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r13;
+; CHECK-SM80-NEXT: ret;
+;
+; CHECK-SM100-LABEL: reduce_fmin_float_nnan_nonpow2(
+; CHECK-SM100: {
+; CHECK-SM100-NEXT: .reg .b32 %r<11>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT: // %bb.0:
+; CHECK-SM100-NEXT: ld.param.b32 %r7, [reduce_fmin_float_nnan_nonpow2_param_0+24];
+; CHECK-SM100-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmin_float_nnan_nonpow2_param_0+16];
+; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmin_float_nnan_nonpow2_param_0];
+; CHECK-SM100-NEXT: min.f32 %r8, %r4, %r5, %r6;
+; CHECK-SM100-NEXT: min.f32 %r9, %r1, %r2, %r3;
+; CHECK-SM100-NEXT: min.f32 %r10, %r9, %r8, %r7;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10;
+; CHECK-SM100-NEXT: ret;
+ %res = call nnan float @llvm.vector.reduce.fmin(<7 x float> %in)
+ ret float %res
+}
+
define half @reduce_fmaximum_half(<8 x half> %in) {
; CHECK-LABEL: reduce_fmaximum_half(
; CHECK: {
@@ -785,84 +1074,131 @@ define half @reduce_fmaximum_half_reassoc_nonpow2(<7 x half> %in) {
ret half %res
}
-; Check straight-line reduction.
define float @reduce_fmaximum_float(<8 x float> %in) {
+; CHECK-SM80-LABEL: reduce_fmaximum_float(
+; CHECK-SM80: {
+; CHECK-SM80-NEXT: .reg .b32 %r<16>;
+; CHECK-SM80-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT: // %bb.0:
+; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmaximum_float_param_0];
+; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1;
+; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2;
+; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmaximum_float_param_0+16];
+; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3;
+; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4;
+; CHECK-SM80-NEXT: max.NaN.f32 %r9, %r7, %r8;
+; CHECK-SM80-NEXT: max.NaN.f32 %r10, %r5, %r6;
+; CHECK-SM80-NEXT: max.NaN.f32 %r11, %r10, %r9;
+; CHECK-SM80-NEXT: max.NaN.f32 %r12, %r3, %r4;
+; CHECK-SM80-NEXT: max.NaN.f32 %r13, %r1, %r2;
+; CHECK-SM80-NEXT: max.NaN.f32 %r14, %r13, %r12;
+; CHECK-SM80-NEXT: max.NaN.f32 %r15, %r14, %r11;
+; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15;
+; CHECK-SM80-NEXT: ret;
;
-; CHECK-LABEL: reduce_fmaximum_float(
-; CHECK: {
-; CHECK-NEXT: .reg .b32 %r<16>;
-; CHECK-NEXT: .reg .b64 %rd<5>;
-; CHECK-EMPTY:
-; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmaximum_float_param_0+16];
-; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmaximum_float_param_0];
-; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4;
-; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2;
-; CHECK-NEXT: max.NaN.f32 %r5, %r4, %r2;
-; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3;
-; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1;
-; CHECK-NEXT: max.NaN.f32 %r10, %r9, %r7;
-; CHECK-NEXT: max.NaN.f32 %r11, %r10, %r5;
-; CHECK-NEXT: max.NaN.f32 %r12, %r3, %r1;
-; CHECK-NEXT: max.NaN.f32 %r13, %r8, %r6;
-; CHECK-NEXT: max.NaN.f32 %r14, %r13, %r12;
-; CHECK-NEXT: max.NaN.f32 %r15, %r14, %r11;
-; CHECK-NEXT: st.param.b32 [func_retval0], %r15;
-; CHECK-NEXT: ret;
+; CHECK-SM100-LABEL: reduce_fmaximum_float(
+; CHECK-SM100: {
+; CHECK-SM100-NEXT: .reg .b32 %r<13>;
+; CHECK-SM100-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT: // %bb.0:
+; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmaximum_float_param_0+16];
+; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4;
+; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmaximum_float_param_0];
+; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3;
+; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2;
+; CHECK-SM100-NEXT: max.NaN.f32 %r9, %r8, %r5, %r6;
+; CHECK-SM100-NEXT: max.NaN.f32 %r10, %r3, %r4, %r7;
+; CHECK-SM100-NEXT: max.NaN.f32 %r11, %r10, %r9, %r1;
+; CHECK-SM100-NEXT: max.NaN.f32 %r12, %r11, %r2;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12;
+; CHECK-SM100-NEXT: ret;
%res = call float @llvm.vector.reduce.fmaximum(<8 x float> %in)
ret float %res
}
define float @reduce_fmaximum_float_reassoc(<8 x float> %in) {
+; CHECK-SM80-LABEL: reduce_fmaximum_float_reassoc(
+; CHECK-SM80: {
+; CHECK-SM80-NEXT: .reg .b32 %r<16>;
+; CHECK-SM80-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT: // %bb.0:
+; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmaximum_float_reassoc_param_0];
+; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1;
+; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2;
+; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmaximum_float_reassoc_param_0+16];
+; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3;
+; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4;
+; CHECK-SM80-NEXT: max.NaN.f32 %r9, %r7, %r8;
+; CHECK-SM80-NEXT: max.NaN.f32 %r10, %r5, %r6;
+; CHECK-SM80-NEXT: max.NaN.f32 %r11, %r10, %r9;
+; CHECK-SM80-NEXT: max.NaN.f32 %r12, %r3, %r4;
+; CHECK-SM80-NEXT: max.NaN.f32 %r13, %r1, %r2;
+; CHECK-SM80-NEXT: max.NaN.f32 %r14, %r13, %r12;
+; CHECK-SM80-NEXT: max.NaN.f32 %r15, %r14, %r11;
+; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15;
+; CHECK-SM80-NEXT: ret;
;
-; CHECK-LABEL: reduce_fmaximum_float_reassoc(
-; CHECK: {
-; CHECK-NEXT: .reg .b32 %r<16>;
-; CHECK-NEXT: .reg .b64 %rd<5>;
-; CHECK-EMPTY:
-; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmaximum_float_reassoc_param_0+16];
-; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmaximum_float_reassoc_param_0];
-; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4;
-; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2;
-; CHECK-NEXT: max.NaN.f32 %r5, %r4, %r2;
-; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3;
-; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1;
-; CHECK-NEXT: max.NaN.f32 %r10, %r9, %r7;
-; CHECK-NEXT: max.NaN.f32 %r11, %r10, %r5;
-; CHECK-NEXT: max.NaN.f32 %r12, %r3, %r1;
-; CHECK-NEXT: max.NaN.f32 %r13, %r8, %r6;
-; CHECK-NEXT: max.NaN.f32 %r14, %r13, %r12;
-; CHECK-NEXT: max.NaN.f32 %r15, %r14, %r11;
-; CHECK-NEXT: st.param.b32 [func_retval0], %r15;
-; CHECK-NEXT: ret;
+; CHECK-SM100-LABEL: reduce_fmaximum_float_reassoc(
+; CHECK-SM100: {
+; CHECK-SM100-NEXT: .reg .b32 %r<13>;
+; CHECK-SM100-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT: // %bb.0:
+; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fmaximum_float_reassoc_param_0+16];
+; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4;
+; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fmaximum_float_reassoc_param_0];
+; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3;
+; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2;
+; CHECK-SM100-NEXT: max.NaN.f32 %r9, %r8, %r5, %r6;
+; CHECK-SM100-NEXT: max.NaN.f32 %r10, %r3, %r4, %r7;
+; CHECK-SM100-NEXT: max.NaN.f32 %r11, %r10, %r9, %r1;
+; CHECK-SM100-NEXT: max.NaN.f32 %r12, %r11, %r2;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12;
+; CHECK-SM100-NEXT: ret;
%res = call reassoc float @llvm.vector.reduce.fmaximum(<8 x float> %in)
ret float %res
}
define float @reduce_fmaximum_float_reassoc_nonpow2(<7 x float> %in) {
+; CHECK-SM80-LABEL: reduce_fmaximum_float_reassoc_nonpow2(
+; CHECK-SM80: {
+; CHECK-SM80-NEXT: .reg .b32 %r<14>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT: // %bb.0:
+; CHECK-SM80-NEXT: ld.param.b32 %r7, [reduce_fmaximum_float_reassoc_nonpow2_param_0+24];
+; CHECK-SM80-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmaximum_float_reassoc_nonpow2_param_0+16];
+; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmaximum_float_reassoc_nonpow2_param_0];
+; CHECK-SM80-NEXT: max.NaN.f32 %r8, %r5, %r6;
+; CHECK-SM80-NEXT: max.NaN.f32 %r9, %r8, %r7;
+; CHECK-SM80-NEXT: max.NaN.f32 %r10, %r3, %r4;
+; CHECK-SM80-NEXT: max.NaN.f32 %r11, %r1, %r2;
+; CHECK-SM80-NEXT: max.NaN.f32 %r12, %r11, %r10;
+; CHECK-SM80-NEXT: max.NaN.f32 %r13, %r12, %r9;
+; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r13;
+; CHECK-SM80-NEXT: ret;
;
-; CHECK-LABEL: reduce_fmaximum_float_reassoc_nonpow2(
-; CHECK: {
-; CHECK-NEXT: .reg .b32 %r<14>;
-; CHECK-EMPTY:
-; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.b32 %r7, [reduce_fmaximum_float_reassoc_nonpow2_param_0+24];
-; CHECK-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmaximum_float_reassoc_nonpow2_param_0+16];
-; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmaximum_float_reassoc_nonpow2_param_0];
-; CHECK-NEXT: max.NaN.f32 %r8, %r3, %r7;
-; CHECK-NEXT: max.NaN.f32 %r9, %r1, %r5;
-; CHECK-NEXT: max.NaN.f32 %r10, %r9, %r8;
-; CHECK-NEXT: max.NaN.f32 %r11, %r2, %r6;
-; CHECK-NEXT: max.NaN.f32 %r12, %r11, %r4;
-; CHECK-NEXT: max.NaN.f32 %r13, %r10, %r12;
-; CHECK-NEXT: st.param.b32 [func_retval0], %r13;
-; CHECK-NEXT: ret;
+; CHECK-SM100-LABEL: reduce_fmaximum_float_reassoc_nonpow2(
+; CHECK-SM100: {
+; CHECK-SM100-NEXT: .reg .b32 %r<11>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT: // %bb.0:
+; CHECK-SM100-NEXT: ld.param.b32 %r7, [reduce_fmaximum_float_reassoc_nonpow2_param_0+24];
+; CHECK-SM100-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fmaximum_float_reassoc_nonpow2_param_0+16];
+; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmaximum_float_reassoc_nonpow2_param_0];
+; CHECK-SM100-NEXT: max.NaN.f32 %r8, %r4, %r5, %r6;
+; CHECK-SM100-NEXT: max.NaN.f32 %r9, %r1, %r2, %r3;
+; CHECK-SM100-NEXT: max.NaN.f32 %r10, %r9, %r8, %r7;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10;
+; CHECK-SM100-NEXT: ret;
%res = call reassoc float @llvm.vector.reduce.fmaximum(<7 x float> %in)
ret float %res
}
-; Check straight-line reduction.
define half @reduce_fminimum_half(<8 x half> %in) {
; CHECK-LABEL: reduce_fminimum_half(
; CHECK: {
@@ -927,79 +1263,127 @@ define half @reduce_fminimum_half_reassoc_nonpow2(<7 x half> %in) {
ret half %res
}
-; Check straight-line reduction.
define float @reduce_fminimum_float(<8 x float> %in) {
+; CHECK-SM80-LABEL: reduce_fminimum_float(
+; CHECK-SM80: {
+; CHECK-SM80-NEXT: .reg .b32 %r<16>;
+; CHECK-SM80-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT: // %bb.0:
+; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fminimum_float_param_0];
+; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1;
+; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2;
+; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fminimum_float_param_0+16];
+; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3;
+; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4;
+; CHECK-SM80-NEXT: min.NaN.f32 %r9, %r7, %r8;
+; CHECK-SM80-NEXT: min.NaN.f32 %r10, %r5, %r6;
+; CHECK-SM80-NEXT: min.NaN.f32 %r11, %r10, %r9;
+; CHECK-SM80-NEXT: min.NaN.f32 %r12, %r3, %r4;
+; CHECK-SM80-NEXT: min.NaN.f32 %r13, %r1, %r2;
+; CHECK-SM80-NEXT: min.NaN.f32 %r14, %r13, %r12;
+; CHECK-SM80-NEXT: min.NaN.f32 %r15, %r14, %r11;
+; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15;
+; CHECK-SM80-NEXT: ret;
;
-; CHECK-LABEL: reduce_fminimum_float(
-; CHECK: {
-; CHECK-NEXT: .reg .b32 %r<16>;
-; CHECK-NEXT: .reg .b64 %rd<5>;
-; CHECK-EMPTY:
-; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fminimum_float_param_0+16];
-; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fminimum_float_param_0];
-; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4;
-; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2;
-; CHECK-NEXT: min.NaN.f32 %r5, %r4, %r2;
-; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3;
-; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1;
-; CHECK-NEXT: min.NaN.f32 %r10, %r9, %r7;
-; CHECK-NEXT: min.NaN.f32 %r11, %r10, %r5;
-; CHECK-NEXT: min.NaN.f32 %r12, %r3, %r1;
-; CHECK-NEXT: min.NaN.f32 %r13, %r8, %r6;
-; CHECK-NEXT: min.NaN.f32 %r14, %r13, %r12;
-; CHECK-NEXT: min.NaN.f32 %r15, %r14, %r11;
-; CHECK-NEXT: st.param.b32 [func_retval0], %r15;
-; CHECK-NEXT: ret;
+; CHECK-SM100-LABEL: reduce_fminimum_float(
+; CHECK-SM100: {
+; CHECK-SM100-NEXT: .reg .b32 %r<13>;
+; CHECK-SM100-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT: // %bb.0:
+; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fminimum_float_param_0+16];
+; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4;
+; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fminimum_float_param_0];
+; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3;
+; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2;
+; CHECK-SM100-NEXT: min.NaN.f32 %r9, %r8, %r5, %r6;
+; CHECK-SM100-NEXT: min.NaN.f32 %r10, %r3, %r4, %r7;
+; CHECK-SM100-NEXT: min.NaN.f32 %r11, %r10, %r9, %r1;
+; CHECK-SM100-NEXT: min.NaN.f32 %r12, %r11, %r2;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12;
+; CHECK-SM100-NEXT: ret;
%res = call float @llvm.vector.reduce.fminimum(<8 x float> %in)
ret float %res
}
define float @reduce_fminimum_float_reassoc(<8 x float> %in) {
+; CHECK-SM80-LABEL: reduce_fminimum_float_reassoc(
+; CHECK-SM80: {
+; CHECK-SM80-NEXT: .reg .b32 %r<16>;
+; CHECK-SM80-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT: // %bb.0:
+; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fminimum_float_reassoc_param_0];
+; CHECK-SM80-NEXT: mov.b64 {%r1, %r2}, %rd1;
+; CHECK-SM80-NEXT: mov.b64 {%r3, %r4}, %rd2;
+; CHECK-SM80-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fminimum_float_reassoc_param_0+16];
+; CHECK-SM80-NEXT: mov.b64 {%r5, %r6}, %rd3;
+; CHECK-SM80-NEXT: mov.b64 {%r7, %r8}, %rd4;
+; CHECK-SM80-NEXT: min.NaN.f32 %r9, %r7, %r8;
+; CHECK-SM80-NEXT: min.NaN.f32 %r10, %r5, %r6;
+; CHECK-SM80-NEXT: min.NaN.f32 %r11, %r10, %r9;
+; CHECK-SM80-NEXT: min.NaN.f32 %r12, %r3, %r4;
+; CHECK-SM80-NEXT: min.NaN.f32 %r13, %r1, %r2;
+; CHECK-SM80-NEXT: min.NaN.f32 %r14, %r13, %r12;
+; CHECK-SM80-NEXT: min.NaN.f32 %r15, %r14, %r11;
+; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r15;
+; CHECK-SM80-NEXT: ret;
;
-; CHECK-LABEL: reduce_fminimum_float_reassoc(
-; CHECK: {
-; CHECK-NEXT: .reg .b32 %r<16>;
-; CHECK-NEXT: .reg .b64 %rd<5>;
-; CHECK-EMPTY:
-; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fminimum_float_reassoc_param_0+16];
-; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fminimum_float_reassoc_param_0];
-; CHECK-NEXT: mov.b64 {%r1, %r2}, %rd4;
-; CHECK-NEXT: mov.b64 {%r3, %r4}, %rd2;
-; CHECK-NEXT: min.NaN.f32 %r5, %r4, %r2;
-; CHECK-NEXT: mov.b64 {%r6, %r7}, %rd3;
-; CHECK-NEXT: mov.b64 {%r8, %r9}, %rd1;
-; CHECK-NEXT: min.NaN.f32 %r10, %r9, %r7;
-; CHECK-NEXT: min.NaN.f32 %r11, %r10, %r5;
-; CHECK-NEXT: min.NaN.f32 %r12, %r3, %r1;
-; CHECK-NEXT: min.NaN.f32 %r13, %r8, %r6;
-; CHECK-NEXT: min.NaN.f32 %r14, %r13, %r12;
-; CHECK-NEXT: min.NaN.f32 %r15, %r14, %r11;
-; CHECK-NEXT: st.param.b32 [func_retval0], %r15;
-; CHECK-NEXT: ret;
+; CHECK-SM100-LABEL: reduce_fminimum_float_reassoc(
+; CHECK-SM100: {
+; CHECK-SM100-NEXT: .reg .b32 %r<13>;
+; CHECK-SM100-NEXT: .reg .b64 %rd<5>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT: // %bb.0:
+; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd3, %rd4}, [reduce_fminimum_float_reassoc_param_0+16];
+; CHECK-SM100-NEXT: mov.b64 {%r1, %r2}, %rd4;
+; CHECK-SM100-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [reduce_fminimum_float_reassoc_param_0];
+; CHECK-SM100-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-SM100-NEXT: mov.b64 {%r5, %r6}, %rd3;
+; CHECK-SM100-NEXT: mov.b64 {%r7, %r8}, %rd2;
+; CHECK-SM100-NEXT: min.NaN.f32 %r9, %r8, %r5, %r6;
+; CHECK-SM100-NEXT: min.NaN.f32 %r10, %r3, %r4, %r7;
+; CHECK-SM100-NEXT: min.NaN.f32 %r11, %r10, %r9, %r1;
+; CHECK-SM100-NEXT: min.NaN.f32 %r12, %r11, %r2;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r12;
+; CHECK-SM100-NEXT: ret;
%res = call reassoc float @llvm.vector.reduce.fminimum(<8 x float> %in)
ret float %res
}
define float @reduce_fminimum_float_reassoc_nonpow2(<7 x float> %in) {
+; CHECK-SM80-LABEL: reduce_fminimum_float_reassoc_nonpow2(
+; CHECK-SM80: {
+; CHECK-SM80-NEXT: .reg .b32 %r<14>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT: // %bb.0:
+; CHECK-SM80-NEXT: ld.param.b32 %r7, [reduce_fminimum_float_reassoc_nonpow2_param_0+24];
+; CHECK-SM80-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fminimum_float_reassoc_nonpow2_param_0+16];
+; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fminimum_float_reassoc_nonpow2_param_0];
+; CHECK-SM80-NEXT: min.NaN.f32 %r8, %r5, %r6;
+; CHECK-SM80-NEXT: min.NaN.f32 %r9, %r8, %r7;
+; CHECK-SM80-NEXT: min.NaN.f32 %r10, %r3, %r4;
+; CHECK-SM80-NEXT: min.NaN.f32 %r11, %r1, %r2;
+; CHECK-SM80-NEXT: min.NaN.f32 %r12, %r11, %r10;
+; CHECK-SM80-NEXT: min.NaN.f32 %r13, %r12, %r9;
+; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r13;
+; CHECK-SM80-NEXT: ret;
;
-; CHECK-LABEL: reduce_fminimum_float_reassoc_nonpow2(
-; CHECK: {
-; CHECK-NEXT: .reg .b32 %r<14>;
-; CHECK-EMPTY:
-; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.b32 %r7, [reduce_fminimum_float_reassoc_nonpow2_param_0+24];
-; CHECK-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fminimum_float_reassoc_nonpow2_param_0+16];
-; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fminimum_float_reassoc_nonpow2_param_0];
-; CHECK-NEXT: min.NaN.f32 %r8, %r3, %r7;
-; CHECK-NEXT: min.NaN.f32 %r9, %r1, %r5;
-; CHECK-NEXT: min.NaN.f32 %r10, %r9, %r8;
-; CHECK-NEXT: min.NaN.f32 %r11, %r2, %r6;
-; CHECK-NEXT: min.NaN.f32 %r12, %r11, %r4;
-; CHECK-NEXT: min.NaN.f32 %r13, %r10, %r12;
-; CHECK-NEXT: st.param.b32 [func_retval0], %r13;
-; CHECK-NEXT: ret;
+; CHECK-SM100-LABEL: reduce_fminimum_float_reassoc_nonpow2(
+; CHECK-SM100: {
+; CHECK-SM100-NEXT: .reg .b32 %r<11>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT: // %bb.0:
+; CHECK-SM100-NEXT: ld.param.b32 %r7, [reduce_fminimum_float_reassoc_nonpow2_param_0+24];
+; CHECK-SM100-NEXT: ld.param.v2.b32 {%r5, %r6}, [reduce_fminimum_float_reassoc_nonpow2_param_0+16];
+; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fminimum_float_reassoc_nonpow2_param_0];
+; CHECK-SM100-NEXT: min.NaN.f32 %r8, %r4, %r5, %r6;
+; CHECK-SM100-NEXT: min.NaN.f32 %r9, %r1, %r2, %r3;
+; CHECK-SM100-NEXT: min.NaN.f32 %r10, %r9, %r8, %r7;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10;
+; CHECK-SM100-NEXT: ret;
%res = call reassoc float @llvm.vector.reduce.fminimum(<7 x float> %in)
ret float %res
}
@@ -1014,15 +1398,15 @@ define i16 @reduce_add_i16(<8 x i16> %in) {
; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_add_i16_param_0];
; CHECK-SM80-NEXT: mov.b32 {%rs1, %rs2}, %r4;
; CHECK-SM80-NEXT: mov.b32 {%rs3, %rs4}, %r2;
-; CHECK-SM80-NEXT: add.s16 %rs5, %rs3, %rs1;
+; CHECK-SM80-NEXT: add.s16 %rs5, %rs4, %rs2;
; CHECK-SM80-NEXT: mov.b32 {%rs6, %rs7}, %r3;
; CHECK-SM80-NEXT: mov.b32 {%rs8, %rs9}, %r1;
-; CHECK-SM80-NEXT: add.s16 %rs10, %rs8, %rs6;
-; CHECK-SM80-NEXT: add.s16 %rs11, %rs4, %rs2;
-; CHECK-SM80-NEXT: add.s16 %rs12, %rs9, %rs7;
-; CHECK-SM80-NEXT: add.s16 %rs13, %rs12, %rs11;
-; CHECK-SM80-NEXT: add.s16 %rs14, %rs10, %rs5;
-; CHECK-SM80-NEXT: add.s16 %rs15, %rs14, %rs13;
+; CHECK-SM80-NEXT: add.s16 %rs10, %rs9, %rs7;
+; CHECK-SM80-NEXT: add.s16 %rs11, %rs10, %rs5;
+; CHECK-SM80-NEXT: add.s16 %rs12, %rs3, %rs1;
+; CHECK-SM80-NEXT: add.s16 %rs13, %rs8, %rs6;
+; CHECK-SM80-NEXT: add.s16 %rs14, %rs13, %rs12;
+; CHECK-SM80-NEXT: add.s16 %rs15, %rs14, %rs11;
; CHECK-SM80-NEXT: cvt.u32.u16 %r5, %rs15;
; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r5;
; CHECK-SM80-NEXT: ret;
@@ -1030,20 +1414,17 @@ define i16 @reduce_add_i16(<8 x i16> %in) {
; CHECK-SM100-LABEL: reduce_add_i16(
; CHECK-SM100: {
; CHECK-SM100-NEXT: .reg .b16 %rs<4>;
-; CHECK-SM100-NEXT: .reg .b32 %r<11>;
+; CHECK-SM100-NEXT: .reg .b32 %r<9>;
; CHECK-SM100-EMPTY:
; CHECK-SM100-NEXT: // %bb.0:
; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_add_i16_param_0];
; CHECK-SM100-NEXT: add.s16x2 %r5, %r2, %r4;
; CHECK-SM100-NEXT: add.s16x2 %r6, %r1, %r3;
; CHECK-SM100-NEXT: add.s16x2 %r7, %r6, %r5;
-; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7;
-; CHECK-SM100-NEXT: // implicit-def: %rs2
-; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2};
-; CHECK-SM100-NEXT: add.s16x2 %r9, %r7, %r8;
-; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9;
-; CHECK-SM100-NEXT: cvt.u32.u16 %r10, %rs3;
-; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10;
+; CHECK-SM100-NEXT: mov.b32 {%rs1, %rs2}, %r7;
+; CHECK-SM100-NEXT: add.s16 %rs3, %rs1, %rs2;
+; CHECK-SM100-NEXT: cvt.u32.u16 %r8, %rs3;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r8;
; CHECK-SM100-NEXT: ret;
%res = call i16 @llvm.vector.reduce.add(<8 x i16> %in)
ret i16 %res
@@ -1103,13 +1484,13 @@ define i32 @reduce_add_i32(<8 x i32> %in) {
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_add_i32_param_0+16];
; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_add_i32_param_0];
-; CHECK-NEXT: add.s32 %r9, %r3, %r7;
-; CHECK-NEXT: add.s32 %r10, %r1, %r5;
-; CHECK-NEXT: add.s32 %r11, %r4, %r8;
-; CHECK-NEXT: add.s32 %r12, %r2, %r6;
-; CHECK-NEXT: add.s32 %r13, %r12, %r11;
-; CHECK-NEXT: add.s32 %r14, %r10, %r9;
-; CHECK-NEXT: add.s32 %r15, %r14, %r13;
+; CHECK-NEXT: add.s32 %r9, %r4, %r8;
+; CHECK-NEXT: add.s32 %r10, %r2, %r6;
+; CHECK-NEXT: add.s32 %r11, %r10, %r9;
+; CHECK-NEXT: add.s32 %r12, %r3, %r7;
+; CHECK-NEXT: add.s32 %r13, %r1, %r5;
+; CHECK-NEXT: add.s32 %r14, %r13, %r12;
+; CHECK-NEXT: add.s32 %r15, %r14, %r11;
; CHECK-NEXT: st.param.b32 [func_retval0], %r15;
; CHECK-NEXT: ret;
%res = call i32 @llvm.vector.reduce.add(<8 x i32> %in)
@@ -1147,15 +1528,15 @@ define i16 @reduce_mul_i16(<8 x i16> %in) {
; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_mul_i16_param_0];
; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r4;
; CHECK-NEXT: mov.b32 {%rs3, %rs4}, %r2;
-; CHECK-NEXT: mul.lo.s16 %rs5, %rs3, %rs1;
+; CHECK-NEXT: mul.lo.s16 %rs5, %rs4, %rs2;
; CHECK-NEXT: mov.b32 {%rs6, %rs7}, %r3;
; CHECK-NEXT: mov.b32 {%rs8, %rs9}, %r1;
-; CHECK-NEXT: mul.lo.s16 %rs10, %rs8, %rs6;
-; CHECK-NEXT: mul.lo.s16 %rs11, %rs4, %rs2;
-; CHECK-NEXT: mul.lo.s16 %rs12, %rs9, %rs7;
-; CHECK-NEXT: mul.lo.s16 %rs13, %rs12, %rs11;
-; CHECK-NEXT: mul.lo.s16 %rs14, %rs10, %rs5;
-; CHECK-NEXT: mul.lo.s16 %rs15, %rs14, %rs13;
+; CHECK-NEXT: mul.lo.s16 %rs10, %rs9, %rs7;
+; CHECK-NEXT: mul.lo.s16 %rs11, %rs10, %rs5;
+; CHECK-NEXT: mul.lo.s16 %rs12, %rs3, %rs1;
+; CHECK-NEXT: mul.lo.s16 %rs13, %rs8, %rs6;
+; CHECK-NEXT: mul.lo.s16 %rs14, %rs13, %rs12;
+; CHECK-NEXT: mul.lo.s16 %rs15, %rs14, %rs11;
; CHECK-NEXT: cvt.u32.u16 %r5, %rs15;
; CHECK-NEXT: st.param.b32 [func_retval0], %r5;
; CHECK-NEXT: ret;
@@ -1194,13 +1575,13 @@ define i32 @reduce_mul_i32(<8 x i32> %in) {
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_mul_i32_param_0+16];
; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_mul_i32_param_0];
-; CHECK-NEXT: mul.lo.s32 %r9, %r3, %r7;
-; CHECK-NEXT: mul.lo.s32 %r10, %r1, %r5;
-; CHECK-NEXT: mul.lo.s32 %r11, %r4, %r8;
-; CHECK-NEXT: mul.lo.s32 %r12, %r2, %r6;
-; CHECK-NEXT: mul.lo.s32 %r13, %r12, %r11;
-; CHECK-NEXT: mul.lo.s32 %r14, %r10, %r9;
-; CHECK-NEXT: mul.lo.s32 %r15, %r14, %r13;
+; CHECK-NEXT: mul.lo.s32 %r9, %r4, %r8;
+; CHECK-NEXT: mul.lo.s32 %r10, %r2, %r6;
+; CHECK-NEXT: mul.lo.s32 %r11, %r10, %r9;
+; CHECK-NEXT: mul.lo.s32 %r12, %r3, %r7;
+; CHECK-NEXT: mul.lo.s32 %r13, %r1, %r5;
+; CHECK-NEXT: mul.lo.s32 %r14, %r13, %r12;
+; CHECK-NEXT: mul.lo.s32 %r15, %r14, %r11;
; CHECK-NEXT: st.param.b32 [func_retval0], %r15;
; CHECK-NEXT: ret;
%res = call i32 @llvm.vector.reduce.mul(<8 x i32> %in)
@@ -1238,15 +1619,15 @@ define i16 @reduce_umax_i16(<8 x i16> %in) {
; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_umax_i16_param_0];
; CHECK-SM80-NEXT: mov.b32 {%rs1, %rs2}, %r4;
; CHECK-SM80-NEXT: mov.b32 {%rs3, %rs4}, %r2;
-; CHECK-SM80-NEXT: max.u16 %rs5, %rs3, %rs1;
+; CHECK-SM80-NEXT: max.u16 %rs5, %rs4, %rs2;
; CHECK-SM80-NEXT: mov.b32 {%rs6, %rs7}, %r3;
; CHECK-SM80-NEXT: mov.b32 {%rs8, %rs9}, %r1;
-; CHECK-SM80-NEXT: max.u16 %rs10, %rs8, %rs6;
-; CHECK-SM80-NEXT: max.u16 %rs11, %rs4, %rs2;
-; CHECK-SM80-NEXT: max.u16 %rs12, %rs9, %rs7;
-; CHECK-SM80-NEXT: max.u16 %rs13, %rs12, %rs11;
-; CHECK-SM80-NEXT: max.u16 %rs14, %rs10, %rs5;
-; CHECK-SM80-NEXT: max.u16 %rs15, %rs14, %rs13;
+; CHECK-SM80-NEXT: max.u16 %rs10, %rs9, %rs7;
+; CHECK-SM80-NEXT: max.u16 %rs11, %rs10, %rs5;
+; CHECK-SM80-NEXT: max.u16 %rs12, %rs3, %rs1;
+; CHECK-SM80-NEXT: max.u16 %rs13, %rs8, %rs6;
+; CHECK-SM80-NEXT: max.u16 %rs14, %rs13, %rs12;
+; CHECK-SM80-NEXT: max.u16 %rs15, %rs14, %rs11;
; CHECK-SM80-NEXT: cvt.u32.u16 %r5, %rs15;
; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r5;
; CHECK-SM80-NEXT: ret;
@@ -1254,20 +1635,17 @@ define i16 @reduce_umax_i16(<8 x i16> %in) {
; CHECK-SM100-LABEL: reduce_umax_i16(
; CHECK-SM100: {
; CHECK-SM100-NEXT: .reg .b16 %rs<4>;
-; CHECK-SM100-NEXT: .reg .b32 %r<11>;
+; CHECK-SM100-NEXT: .reg .b32 %r<9>;
; CHECK-SM100-EMPTY:
; CHECK-SM100-NEXT: // %bb.0:
; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_umax_i16_param_0];
; CHECK-SM100-NEXT: max.u16x2 %r5, %r2, %r4;
; CHECK-SM100-NEXT: max.u16x2 %r6, %r1, %r3;
; CHECK-SM100-NEXT: max.u16x2 %r7, %r6, %r5;
-; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7;
-; CHECK-SM100-NEXT: // implicit-def: %rs2
-; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2};
-; CHECK-SM100-NEXT: max.u16x2 %r9, %r7, %r8;
-; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9;
-; CHECK-SM100-NEXT: cvt.u32.u16 %r10, %rs3;
-; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10;
+; CHECK-SM100-NEXT: mov.b32 {%rs1, %rs2}, %r7;
+; CHECK-SM100-NEXT: max.u16 %rs3, %rs1, %rs2;
+; CHECK-SM100-NEXT: cvt.u32.u16 %r8, %rs3;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r8;
; CHECK-SM100-NEXT: ret;
%res = call i16 @llvm.vector.reduce.umax(<8 x i16> %in)
ret i16 %res
@@ -1327,13 +1705,13 @@ define i32 @reduce_umax_i32(<8 x i32> %in) {
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_umax_i32_param_0+16];
; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_umax_i32_param_0];
-; CHECK-NEXT: max.u32 %r9, %r3, %r7;
-; CHECK-NEXT: max.u32 %r10, %r1, %r5;
-; CHECK-NEXT: max.u32 %r11, %r4, %r8;
-; CHECK-NEXT: max.u32 %r12, %r2, %r6;
-; CHECK-NEXT: max.u32 %r13, %r12, %r11;
-; CHECK-NEXT: max.u32 %r14, %r10, %r9;
-; CHECK-NEXT: max.u32 %r15, %r14, %r13;
+; CHECK-NEXT: max.u32 %r9, %r4, %r8;
+; CHECK-NEXT: max.u32 %r10, %r2, %r6;
+; CHECK-NEXT: max.u32 %r11, %r10, %r9;
+; CHECK-NEXT: max.u32 %r12, %r3, %r7;
+; CHECK-NEXT: max.u32 %r13, %r1, %r5;
+; CHECK-NEXT: max.u32 %r14, %r13, %r12;
+; CHECK-NEXT: max.u32 %r15, %r14, %r11;
; CHECK-NEXT: st.param.b32 [func_retval0], %r15;
; CHECK-NEXT: ret;
%res = call i32 @llvm.vector.reduce.umax(<8 x i32> %in)
@@ -1371,15 +1749,15 @@ define i16 @reduce_umin_i16(<8 x i16> %in) {
; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_umin_i16_param_0];
; CHECK-SM80-NEXT: mov.b32 {%rs1, %rs2}, %r4;
; CHECK-SM80-NEXT: mov.b32 {%rs3, %rs4}, %r2;
-; CHECK-SM80-NEXT: min.u16 %rs5, %rs3, %rs1;
+; CHECK-SM80-NEXT: min.u16 %rs5, %rs4, %rs2;
; CHECK-SM80-NEXT: mov.b32 {%rs6, %rs7}, %r3;
; CHECK-SM80-NEXT: mov.b32 {%rs8, %rs9}, %r1;
-; CHECK-SM80-NEXT: min.u16 %rs10, %rs8, %rs6;
-; CHECK-SM80-NEXT: min.u16 %rs11, %rs4, %rs2;
-; CHECK-SM80-NEXT: min.u16 %rs12, %rs9, %rs7;
-; CHECK-SM80-NEXT: min.u16 %rs13, %rs12, %rs11;
-; CHECK-SM80-NEXT: min.u16 %rs14, %rs10, %rs5;
-; CHECK-SM80-NEXT: min.u16 %rs15, %rs14, %rs13;
+; CHECK-SM80-NEXT: min.u16 %rs10, %rs9, %rs7;
+; CHECK-SM80-NEXT: min.u16 %rs11, %rs10, %rs5;
+; CHECK-SM80-NEXT: min.u16 %rs12, %rs3, %rs1;
+; CHECK-SM80-NEXT: min.u16 %rs13, %rs8, %rs6;
+; CHECK-SM80-NEXT: min.u16 %rs14, %rs13, %rs12;
+; CHECK-SM80-NEXT: min.u16 %rs15, %rs14, %rs11;
; CHECK-SM80-NEXT: cvt.u32.u16 %r5, %rs15;
; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r5;
; CHECK-SM80-NEXT: ret;
@@ -1387,20 +1765,17 @@ define i16 @reduce_umin_i16(<8 x i16> %in) {
; CHECK-SM100-LABEL: reduce_umin_i16(
; CHECK-SM100: {
; CHECK-SM100-NEXT: .reg .b16 %rs<4>;
-; CHECK-SM100-NEXT: .reg .b32 %r<11>;
+; CHECK-SM100-NEXT: .reg .b32 %r<9>;
; CHECK-SM100-EMPTY:
; CHECK-SM100-NEXT: // %bb.0:
; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_umin_i16_param_0];
; CHECK-SM100-NEXT: min.u16x2 %r5, %r2, %r4;
; CHECK-SM100-NEXT: min.u16x2 %r6, %r1, %r3;
; CHECK-SM100-NEXT: min.u16x2 %r7, %r6, %r5;
-; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7;
-; CHECK-SM100-NEXT: // implicit-def: %rs2
-; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2};
-; CHECK-SM100-NEXT: min.u16x2 %r9, %r7, %r8;
-; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9;
-; CHECK-SM100-NEXT: cvt.u32.u16 %r10, %rs3;
-; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10;
+; CHECK-SM100-NEXT: mov.b32 {%rs1, %rs2}, %r7;
+; CHECK-SM100-NEXT: min.u16 %rs3, %rs1, %rs2;
+; CHECK-SM100-NEXT: cvt.u32.u16 %r8, %rs3;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r8;
; CHECK-SM100-NEXT: ret;
%res = call i16 @llvm.vector.reduce.umin(<8 x i16> %in)
ret i16 %res
@@ -1460,13 +1835,13 @@ define i32 @reduce_umin_i32(<8 x i32> %in) {
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_umin_i32_param_0+16];
; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_umin_i32_param_0];
-; CHECK-NEXT: min.u32 %r9, %r3, %r7;
-; CHECK-NEXT: min.u32 %r10, %r1, %r5;
-; CHECK-NEXT: min.u32 %r11, %r4, %r8;
-; CHECK-NEXT: min.u32 %r12, %r2, %r6;
-; CHECK-NEXT: min.u32 %r13, %r12, %r11;
-; CHECK-NEXT: min.u32 %r14, %r10, %r9;
-; CHECK-NEXT: min.u32 %r15, %r14, %r13;
+; CHECK-NEXT: min.u32 %r9, %r4, %r8;
+; CHECK-NEXT: min.u32 %r10, %r2, %r6;
+; CHECK-NEXT: min.u32 %r11, %r10, %r9;
+; CHECK-NEXT: min.u32 %r12, %r3, %r7;
+; CHECK-NEXT: min.u32 %r13, %r1, %r5;
+; CHECK-NEXT: min.u32 %r14, %r13, %r12;
+; CHECK-NEXT: min.u32 %r15, %r14, %r11;
; CHECK-NEXT: st.param.b32 [func_retval0], %r15;
; CHECK-NEXT: ret;
%res = call i32 @llvm.vector.reduce.umin(<8 x i32> %in)
@@ -1504,15 +1879,15 @@ define i16 @reduce_smax_i16(<8 x i16> %in) {
; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_smax_i16_param_0];
; CHECK-SM80-NEXT: mov.b32 {%rs1, %rs2}, %r4;
; CHECK-SM80-NEXT: mov.b32 {%rs3, %rs4}, %r2;
-; CHECK-SM80-NEXT: max.s16 %rs5, %rs3, %rs1;
+; CHECK-SM80-NEXT: max.s16 %rs5, %rs4, %rs2;
; CHECK-SM80-NEXT: mov.b32 {%rs6, %rs7}, %r3;
; CHECK-SM80-NEXT: mov.b32 {%rs8, %rs9}, %r1;
-; CHECK-SM80-NEXT: max.s16 %rs10, %rs8, %rs6;
-; CHECK-SM80-NEXT: max.s16 %rs11, %rs4, %rs2;
-; CHECK-SM80-NEXT: max.s16 %rs12, %rs9, %rs7;
-; CHECK-SM80-NEXT: max.s16 %rs13, %rs12, %rs11;
-; CHECK-SM80-NEXT: max.s16 %rs14, %rs10, %rs5;
-; CHECK-SM80-NEXT: max.s16 %rs15, %rs14, %rs13;
+; CHECK-SM80-NEXT: max.s16 %rs10, %rs9, %rs7;
+; CHECK-SM80-NEXT: max.s16 %rs11, %rs10, %rs5;
+; CHECK-SM80-NEXT: max.s16 %rs12, %rs3, %rs1;
+; CHECK-SM80-NEXT: max.s16 %rs13, %rs8, %rs6;
+; CHECK-SM80-NEXT: max.s16 %rs14, %rs13, %rs12;
+; CHECK-SM80-NEXT: max.s16 %rs15, %rs14, %rs11;
; CHECK-SM80-NEXT: cvt.u32.u16 %r5, %rs15;
; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r5;
; CHECK-SM80-NEXT: ret;
@@ -1520,20 +1895,17 @@ define i16 @reduce_smax_i16(<8 x i16> %in) {
; CHECK-SM100-LABEL: reduce_smax_i16(
; CHECK-SM100: {
; CHECK-SM100-NEXT: .reg .b16 %rs<4>;
-; CHECK-SM100-NEXT: .reg .b32 %r<11>;
+; CHECK-SM100-NEXT: .reg .b32 %r<9>;
; CHECK-SM100-EMPTY:
; CHECK-SM100-NEXT: // %bb.0:
; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_smax_i16_param_0];
; CHECK-SM100-NEXT: max.s16x2 %r5, %r2, %r4;
; CHECK-SM100-NEXT: max.s16x2 %r6, %r1, %r3;
; CHECK-SM100-NEXT: max.s16x2 %r7, %r6, %r5;
-; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7;
-; CHECK-SM100-NEXT: // implicit-def: %rs2
-; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2};
-; CHECK-SM100-NEXT: max.s16x2 %r9, %r7, %r8;
-; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9;
-; CHECK-SM100-NEXT: cvt.u32.u16 %r10, %rs3;
-; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10;
+; CHECK-SM100-NEXT: mov.b32 {%rs1, %rs2}, %r7;
+; CHECK-SM100-NEXT: max.s16 %rs3, %rs1, %rs2;
+; CHECK-SM100-NEXT: cvt.u32.u16 %r8, %rs3;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r8;
; CHECK-SM100-NEXT: ret;
%res = call i16 @llvm.vector.reduce.smax(<8 x i16> %in)
ret i16 %res
@@ -1593,13 +1965,13 @@ define i32 @reduce_smax_i32(<8 x i32> %in) {
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_smax_i32_param_0+16];
; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_smax_i32_param_0];
-; CHECK-NEXT: max.s32 %r9, %r3, %r7;
-; CHECK-NEXT: max.s32 %r10, %r1, %r5;
-; CHECK-NEXT: max.s32 %r11, %r4, %r8;
-; CHECK-NEXT: max.s32 %r12, %r2, %r6;
-; CHECK-NEXT: max.s32 %r13, %r12, %r11;
-; CHECK-NEXT: max.s32 %r14, %r10, %r9;
-; CHECK-NEXT: max.s32 %r15, %r14, %r13;
+; CHECK-NEXT: max.s32 %r9, %r4, %r8;
+; CHECK-NEXT: max.s32 %r10, %r2, %r6;
+; CHECK-NEXT: max.s32 %r11, %r10, %r9;
+; CHECK-NEXT: max.s32 %r12, %r3, %r7;
+; CHECK-NEXT: max.s32 %r13, %r1, %r5;
+; CHECK-NEXT: max.s32 %r14, %r13, %r12;
+; CHECK-NEXT: max.s32 %r15, %r14, %r11;
; CHECK-NEXT: st.param.b32 [func_retval0], %r15;
; CHECK-NEXT: ret;
%res = call i32 @llvm.vector.reduce.smax(<8 x i32> %in)
@@ -1637,15 +2009,15 @@ define i16 @reduce_smin_i16(<8 x i16> %in) {
; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_smin_i16_param_0];
; CHECK-SM80-NEXT: mov.b32 {%rs1, %rs2}, %r4;
; CHECK-SM80-NEXT: mov.b32 {%rs3, %rs4}, %r2;
-; CHECK-SM80-NEXT: min.s16 %rs5, %rs3, %rs1;
+; CHECK-SM80-NEXT: min.s16 %rs5, %rs4, %rs2;
; CHECK-SM80-NEXT: mov.b32 {%rs6, %rs7}, %r3;
; CHECK-SM80-NEXT: mov.b32 {%rs8, %rs9}, %r1;
-; CHECK-SM80-NEXT: min.s16 %rs10, %rs8, %rs6;
-; CHECK-SM80-NEXT: min.s16 %rs11, %rs4, %rs2;
-; CHECK-SM80-NEXT: min.s16 %rs12, %rs9, %rs7;
-; CHECK-SM80-NEXT: min.s16 %rs13, %rs12, %rs11;
-; CHECK-SM80-NEXT: min.s16 %rs14, %rs10, %rs5;
-; CHECK-SM80-NEXT: min.s16 %rs15, %rs14, %rs13;
+; CHECK-SM80-NEXT: min.s16 %rs10, %rs9, %rs7;
+; CHECK-SM80-NEXT: min.s16 %rs11, %rs10, %rs5;
+; CHECK-SM80-NEXT: min.s16 %rs12, %rs3, %rs1;
+; CHECK-SM80-NEXT: min.s16 %rs13, %rs8, %rs6;
+; CHECK-SM80-NEXT: min.s16 %rs14, %rs13, %rs12;
+; CHECK-SM80-NEXT: min.s16 %rs15, %rs14, %rs11;
; CHECK-SM80-NEXT: cvt.u32.u16 %r5, %rs15;
; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r5;
; CHECK-SM80-NEXT: ret;
@@ -1653,20 +2025,17 @@ define i16 @reduce_smin_i16(<8 x i16> %in) {
; CHECK-SM100-LABEL: reduce_smin_i16(
; CHECK-SM100: {
; CHECK-SM100-NEXT: .reg .b16 %rs<4>;
-; CHECK-SM100-NEXT: .reg .b32 %r<11>;
+; CHECK-SM100-NEXT: .reg .b32 %r<9>;
; CHECK-SM100-EMPTY:
; CHECK-SM100-NEXT: // %bb.0:
; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_smin_i16_param_0];
; CHECK-SM100-NEXT: min.s16x2 %r5, %r2, %r4;
; CHECK-SM100-NEXT: min.s16x2 %r6, %r1, %r3;
; CHECK-SM100-NEXT: min.s16x2 %r7, %r6, %r5;
-; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7;
-; CHECK-SM100-NEXT: // implicit-def: %rs2
-; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2};
-; CHECK-SM100-NEXT: min.s16x2 %r9, %r7, %r8;
-; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9;
-; CHECK-SM100-NEXT: cvt.u32.u16 %r10, %rs3;
-; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10;
+; CHECK-SM100-NEXT: mov.b32 {%rs1, %rs2}, %r7;
+; CHECK-SM100-NEXT: min.s16 %rs3, %rs1, %rs2;
+; CHECK-SM100-NEXT: cvt.u32.u16 %r8, %rs3;
+; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r8;
; CHECK-SM100-NEXT: ret;
%res = call i16 @llvm.vector.reduce.smin(<8 x i16> %in)
ret i16 %res
@@ -1726,13 +2095,13 @@ define i32 @reduce_smin_i32(<8 x i32> %in) {
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_smin_i32_param_0+16];
; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_smin_i32_param_0];
-; CHECK-NEXT: min.s32 %r9, %r3, %r7;
-; CHECK-NEXT: min.s32 %r10, %r1, %r5;
-; CHECK-NEXT: min.s32 %r11, %r4, %r8;
-; CHECK-NEXT: min.s32 %r12, %r2, %r6;
-; CHECK-NEXT: min.s32 %r13, %r12, %r11;
-; CHECK-NEXT: min.s32 %r14, %r10, %r9;
-; CHECK-NEXT: min.s32 %r15, %r14, %r13;
+; CHECK-NEXT: min.s32 %r9, %r4, %r8;
+; CHECK-NEXT: min.s32 %r10, %r2, %r6;
+; CHECK-NEXT: min.s32 %r11, %r10, %r9;
+; CHECK-NEXT: min.s32 %r12, %r3, %r7;
+; CHECK-NEXT: min.s32 %r13, %r1, %r5;
+; CHECK-NEXT: min.s32 %r14, %r13, %r12;
+; CHECK-NEXT: min.s32 %r15, %r14, %r11;
; CHECK-NEXT: st.param.b32 [func_retval0], %r15;
; CHECK-NEXT: ret;
%res = call i32 @llvm.vector.reduce.smin(<8 x i32> %in)
@@ -1761,43 +2130,21 @@ define i32 @reduce_smin_i32_nonpow2(<7 x i32> %in) {
}
define i16 @reduce_and_i16(<8 x i16> %in) {
-; CHECK-SM80-LABEL: reduce_and_i16(
-; CHECK-SM80: {
-; CHECK-SM80-NEXT: .reg .b16 %rs<4>;
-; CHECK-SM80-NEXT: .reg .b32 %r<11>;
-; CHECK-SM80-EMPTY:
-; CHECK-SM80-NEXT: // %bb.0:
-; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_and_i16_param_0];
-; CHECK-SM80-NEXT: and.b32 %r5, %r2, %r4;
-; CHECK-SM80-NEXT: and.b32 %r6, %r1, %r3;
-; CHECK-SM80-NEXT: and.b32 %r7, %r6, %r5;
-; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs1}, %r7; }
-; CHECK-SM80-NEXT: // implicit-def: %rs2
-; CHECK-SM80-NEXT: mov.b32 %r8, {%rs1, %rs2};
-; CHECK-SM80-NEXT: and.b32 %r9, %r7, %r8;
-; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {%rs3, tmp}, %r9; }
-; CHECK-SM80-NEXT: cvt.u32.u16 %r10, %rs3;
-; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r10;
-; CHECK-SM80-NEXT: ret;
-;
-; CHECK-SM100-LABEL: reduce_and_i16(
-; CHECK-SM100: {
-; CHECK-SM100-NEXT: .reg .b16 %rs<4>;
-; CHECK-SM100-NEXT: .reg .b32 %r<11>;
-; CHECK-SM100-EMPTY:
-; CHECK-SM100-NEXT: // %bb.0:
-; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_and_i16_param_0];
-; CHECK-SM100-NEXT: and.b32 %r5, %r2, %r4;
-; CHECK-SM100-NEXT: and.b32 %r6, %r1, %r3;
-; CHECK-SM100-NEXT: and.b32 %r7, %r6, %r5;
-; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7;
-; CHECK-SM100-NEXT: // implicit-def: %rs2
-; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2};
-; CHECK-SM100-NEXT: and.b32 %r9, %r7, %r8;
-; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9;
-; CHECK-SM100-NEXT: cvt.u32.u16 %r10, %rs3;
-; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10;
-; CHECK-SM100-NEXT: ret;
+; CHECK-LABEL: reduce_and_i16(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<4>;
+; CHECK-NEXT: .reg .b32 %r<9>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_and_i16_param_0];
+; CHECK-NEXT: and.b32 %r5, %r2, %r4;
+; CHECK-NEXT: and.b32 %r6, %r1, %r3;
+; CHECK-NEXT: and.b32 %r7, %r6, %r5;
+; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r7;
+; CHECK-NEXT: and.b16 %rs3, %rs1, %rs2;
+; CHECK-NEXT: cvt.u32.u16 %r8, %rs3;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r8;
+; CHECK-NEXT: ret;
%res = call i16 @llvm.vector.reduce.and(<8 x i16> %in)
ret i16 %res
}
@@ -1837,13 +2184,13 @@ define i32 @reduce_and_i32(<8 x i32> %in) {
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_and_i32_param_0+16];
; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_and_i32_param_0];
-; CHECK-NEXT: and.b32 %r9, %r3, %r7;
-; CHECK-NEXT: and.b32 %r10, %r1, %r5;
-; CHECK-NEXT: and.b32 %r11, %r4, %r8;
-; CHECK-NEXT: and.b32 %r12, %r2, %r6;
-; CHECK-NEXT: and.b32 %r13, %r12, %r11;
-; CHECK-NEXT: and.b32 %r14, %r10, %r9;
-; CHECK-NEXT: and.b32 %r15, %r14, %r13;
+; CHECK-NEXT: and.b32 %r9, %r4, %r8;
+; CHECK-NEXT: and.b32 %r10, %r2, %r6;
+; CHECK-NEXT: and.b32 %r11, %r10, %r9;
+; CHECK-NEXT: and.b32 %r12, %r3, %r7;
+; CHECK-NEXT: and.b32 %r13, %r1, %r5;
+; CHECK-NEXT: and.b32 %r14, %r13, %r12;
+; CHECK-NEXT: and.b32 %r15, %r14, %r11;
; CHECK-NEXT: st.param.b32 [func_retval0], %r15;
; CHECK-NEXT: ret;
%res = call i32 @llvm.vector.reduce.and(<8 x i32> %in)
@@ -1872,43 +2219,21 @@ define i32 @reduce_and_i32_nonpow2(<7 x i32> %in) {
}
define i16 @reduce_or_i16(<8 x i16> %in) {
-; CHECK-SM80-LABEL: reduce_or_i16(
-; CHECK-SM80: {
-; CHECK-SM80-NEXT: .reg .b16 %rs<4>;
-; CHECK-SM80-NEXT: .reg .b32 %r<11>;
-; CHECK-SM80-EMPTY:
-; CHECK-SM80-NEXT: // %bb.0:
-; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_or_i16_param_0];
-; CHECK-SM80-NEXT: or.b32 %r5, %r2, %r4;
-; CHECK-SM80-NEXT: or.b32 %r6, %r1, %r3;
-; CHECK-SM80-NEXT: or.b32 %r7, %r6, %r5;
-; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs1}, %r7; }
-; CHECK-SM80-NEXT: // implicit-def: %rs2
-; CHECK-SM80-NEXT: mov.b32 %r8, {%rs1, %rs2};
-; CHECK-SM80-NEXT: or.b32 %r9, %r7, %r8;
-; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {%rs3, tmp}, %r9; }
-; CHECK-SM80-NEXT: cvt.u32.u16 %r10, %rs3;
-; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r10;
-; CHECK-SM80-NEXT: ret;
-;
-; CHECK-SM100-LABEL: reduce_or_i16(
-; CHECK-SM100: {
-; CHECK-SM100-NEXT: .reg .b16 %rs<4>;
-; CHECK-SM100-NEXT: .reg .b32 %r<11>;
-; CHECK-SM100-EMPTY:
-; CHECK-SM100-NEXT: // %bb.0:
-; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_or_i16_param_0];
-; CHECK-SM100-NEXT: or.b32 %r5, %r2, %r4;
-; CHECK-SM100-NEXT: or.b32 %r6, %r1, %r3;
-; CHECK-SM100-NEXT: or.b32 %r7, %r6, %r5;
-; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7;
-; CHECK-SM100-NEXT: // implicit-def: %rs2
-; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2};
-; CHECK-SM100-NEXT: or.b32 %r9, %r7, %r8;
-; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9;
-; CHECK-SM100-NEXT: cvt.u32.u16 %r10, %rs3;
-; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10;
-; CHECK-SM100-NEXT: ret;
+; CHECK-LABEL: reduce_or_i16(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<4>;
+; CHECK-NEXT: .reg .b32 %r<9>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_or_i16_param_0];
+; CHECK-NEXT: or.b32 %r5, %r2, %r4;
+; CHECK-NEXT: or.b32 %r6, %r1, %r3;
+; CHECK-NEXT: or.b32 %r7, %r6, %r5;
+; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r7;
+; CHECK-NEXT: or.b16 %rs3, %rs1, %rs2;
+; CHECK-NEXT: cvt.u32.u16 %r8, %rs3;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r8;
+; CHECK-NEXT: ret;
%res = call i16 @llvm.vector.reduce.or(<8 x i16> %in)
ret i16 %res
}
@@ -1948,13 +2273,13 @@ define i32 @reduce_or_i32(<8 x i32> %in) {
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_or_i32_param_0+16];
; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_or_i32_param_0];
-; CHECK-NEXT: or.b32 %r9, %r3, %r7;
-; CHECK-NEXT: or.b32 %r10, %r1, %r5;
-; CHECK-NEXT: or.b32 %r11, %r4, %r8;
-; CHECK-NEXT: or.b32 %r12, %r2, %r6;
-; CHECK-NEXT: or.b32 %r13, %r12, %r11;
-; CHECK-NEXT: or.b32 %r14, %r10, %r9;
-; CHECK-NEXT: or.b32 %r15, %r14, %r13;
+; CHECK-NEXT: or.b32 %r9, %r4, %r8;
+; CHECK-NEXT: or.b32 %r10, %r2, %r6;
+; CHECK-NEXT: or.b32 %r11, %r10, %r9;
+; CHECK-NEXT: or.b32 %r12, %r3, %r7;
+; CHECK-NEXT: or.b32 %r13, %r1, %r5;
+; CHECK-NEXT: or.b32 %r14, %r13, %r12;
+; CHECK-NEXT: or.b32 %r15, %r14, %r11;
; CHECK-NEXT: st.param.b32 [func_retval0], %r15;
; CHECK-NEXT: ret;
%res = call i32 @llvm.vector.reduce.or(<8 x i32> %in)
@@ -1983,43 +2308,21 @@ define i32 @reduce_or_i32_nonpow2(<7 x i32> %in) {
}
define i16 @reduce_xor_i16(<8 x i16> %in) {
-; CHECK-SM80-LABEL: reduce_xor_i16(
-; CHECK-SM80: {
-; CHECK-SM80-NEXT: .reg .b16 %rs<4>;
-; CHECK-SM80-NEXT: .reg .b32 %r<11>;
-; CHECK-SM80-EMPTY:
-; CHECK-SM80-NEXT: // %bb.0:
-; CHECK-SM80-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_xor_i16_param_0];
-; CHECK-SM80-NEXT: xor.b32 %r5, %r2, %r4;
-; CHECK-SM80-NEXT: xor.b32 %r6, %r1, %r3;
-; CHECK-SM80-NEXT: xor.b32 %r7, %r6, %r5;
-; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {tmp, %rs1}, %r7; }
-; CHECK-SM80-NEXT: // implicit-def: %rs2
-; CHECK-SM80-NEXT: mov.b32 %r8, {%rs1, %rs2};
-; CHECK-SM80-NEXT: xor.b32 %r9, %r7, %r8;
-; CHECK-SM80-NEXT: { .reg .b16 tmp; mov.b32 {%rs3, tmp}, %r9; }
-; CHECK-SM80-NEXT: cvt.u32.u16 %r10, %rs3;
-; CHECK-SM80-NEXT: st.param.b32 [func_retval0], %r10;
-; CHECK-SM80-NEXT: ret;
-;
-; CHECK-SM100-LABEL: reduce_xor_i16(
-; CHECK-SM100: {
-; CHECK-SM100-NEXT: .reg .b16 %rs<4>;
-; CHECK-SM100-NEXT: .reg .b32 %r<11>;
-; CHECK-SM100-EMPTY:
-; CHECK-SM100-NEXT: // %bb.0:
-; CHECK-SM100-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_xor_i16_param_0];
-; CHECK-SM100-NEXT: xor.b32 %r5, %r2, %r4;
-; CHECK-SM100-NEXT: xor.b32 %r6, %r1, %r3;
-; CHECK-SM100-NEXT: xor.b32 %r7, %r6, %r5;
-; CHECK-SM100-NEXT: mov.b32 {_, %rs1}, %r7;
-; CHECK-SM100-NEXT: // implicit-def: %rs2
-; CHECK-SM100-NEXT: mov.b32 %r8, {%rs1, %rs2};
-; CHECK-SM100-NEXT: xor.b32 %r9, %r7, %r8;
-; CHECK-SM100-NEXT: mov.b32 {%rs3, _}, %r9;
-; CHECK-SM100-NEXT: cvt.u32.u16 %r10, %rs3;
-; CHECK-SM100-NEXT: st.param.b32 [func_retval0], %r10;
-; CHECK-SM100-NEXT: ret;
+; CHECK-LABEL: reduce_xor_i16(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<4>;
+; CHECK-NEXT: .reg .b32 %r<9>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_xor_i16_param_0];
+; CHECK-NEXT: xor.b32 %r5, %r2, %r4;
+; CHECK-NEXT: xor.b32 %r6, %r1, %r3;
+; CHECK-NEXT: xor.b32 %r7, %r6, %r5;
+; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r7;
+; CHECK-NEXT: xor.b16 %rs3, %rs1, %rs2;
+; CHECK-NEXT: cvt.u32.u16 %r8, %rs3;
+; CHECK-NEXT: st.param.b32 [func_retval0], %r8;
+; CHECK-NEXT: ret;
%res = call i16 @llvm.vector.reduce.xor(<8 x i16> %in)
ret i16 %res
}
@@ -2059,13 +2362,13 @@ define i32 @reduce_xor_i32(<8 x i32> %in) {
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_xor_i32_param_0+16];
; CHECK-NEXT: ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_xor_i32_param_0];
-; CHECK-NEXT: xor.b32 %r9, %r3, %r7;
-; CHECK-NEXT: xor.b32 %r10, %r1, %r5;
-; CHECK-NEXT: xor.b32 %r11, %r4, %r8;
-; CHECK-NEXT: xor.b32 %r12, %r2, %r6;
-; CHECK-NEXT: xor.b32 %r13, %r12, %r11;
-; CHECK-NEXT: xor.b32 %r14, %r10, %r9;
-; CHECK-NEXT: xor.b32 %r15, %r14, %r13;
+; CHECK-NEXT: xor.b32 %r9, %r4, %r8;
+; CHECK-NEXT: xor.b32 %r10, %r2, %r6;
+; CHECK-NEXT: xor.b32 %r11, %r10, %r9;
+; CHECK-NEXT: xor.b32 %r12, %r3, %r7;
+; CHECK-NEXT: xor.b32 %r13, %r1, %r5;
+; CHECK-NEXT: xor.b32 %r14, %r13, %r12;
+; CHECK-NEXT: xor.b32 %r15, %r14, %r11;
; CHECK-NEXT: st.param.b32 [func_retval0], %r15;
; CHECK-NEXT: ret;
%res = call i32 @llvm.vector.reduce.xor(<8 x i32> %in)
diff --git a/llvm/test/CodeGen/PowerPC/aix-nest-param.ll b/llvm/test/CodeGen/PowerPC/aix-nest-param.ll
index 1863eaf..bfc7fbb 100644
--- a/llvm/test/CodeGen/PowerPC/aix-nest-param.ll
+++ b/llvm/test/CodeGen/PowerPC/aix-nest-param.ll
@@ -1,5 +1,5 @@
-; RUN: not --crash llc -mtriple powerpc-ibm-aix-xcoff < %s 2>&1 | FileCheck %s
-; RUN: not --crash llc -mtriple powerpc64-ibm-aix-xcoff < %s 2>&1 | FileCheck %s
+; RUN: llc -mtriple powerpc-ibm-aix-xcoff < %s 2>&1 | FileCheck %s
+; RUN: llc -mtriple powerpc64-ibm-aix-xcoff < %s 2>&1 | FileCheck %s
define ptr @nest_receiver(ptr nest %arg) nounwind {
ret ptr %arg
@@ -9,5 +9,10 @@ define ptr @nest_caller(ptr %arg) nounwind {
%result = call ptr @nest_receiver(ptr nest %arg)
ret ptr %result
}
+; CHECK-LABEL: .nest_receiver:
+; CHECK: mr 3, 11
+; CHECK: blr
-; CHECK: LLVM ERROR: Nest arguments are unimplemented.
+; CHECK-LABEL: .nest_caller:
+; CHECK: mr 11, 3
+; CHECK: bl .nest_receiver
diff --git a/llvm/test/CodeGen/PowerPC/aix-trampoline.ll b/llvm/test/CodeGen/PowerPC/aix-trampoline.ll
index b71f6b5..19df220 100644
--- a/llvm/test/CodeGen/PowerPC/aix-trampoline.ll
+++ b/llvm/test/CodeGen/PowerPC/aix-trampoline.ll
@@ -1,7 +1,7 @@
-; RUN: not --crash llc -mtriple powerpc-ibm-aix-xcoff < %s 2>&1 | FileCheck %s
-; RUN: not --crash llc -mtriple powerpc64-ibm-aix-xcoff < %s 2>&1 | FileCheck %s
-
-; CHECK: LLVM ERROR: INIT_TRAMPOLINE operation is not supported on AIX.
+; RUN: llc -mtriple powerpc-ibm-aix-xcoff < %s 2>&1 | \
+; RUN: FileCheck %s --check-prefix=32BIT
+; RUN: llc -mtriple powerpc64-ibm-aix-xcoff < %s 2>&1 -mattr=-altivec | \
+; RUN: FileCheck %s --check-prefix=64BIT
define void @create_trampoline(ptr %buffer, ptr %nval) nounwind {
entry:
@@ -12,3 +12,17 @@ entry:
declare i32 @nested(i32);
declare void @llvm.init.trampoline(ptr, ptr, ptr) nounwind
+
+; 32BIT: stw 4, 8(3)
+; 32BIT: lwz [[FuncDesc:[0-9]+]], L..C0(2)
+; 32BIT-DAG: lwz [[SCRATCH1:[0-9]+]], 0([[FuncDesc]])
+; 32BIT-DAG: lwz [[SCRATCH2:[0-9]+]], 4([[FuncDesc]])
+; 32BIT-DAG: stw [[SCRATCH1]], 0(3)
+; 32BIT-DAG: stw [[SCRATCH2]], 4(3)
+
+; 64BIT: std 4, 16(3)
+; 64BIT-DAG: ld [[FuncDesc:[0-9]+]], L..C0(2)
+; 64BIT-DAG: ld [[SCRATCH1:[0-9]+]], 0([[FuncDesc]])
+; 64BIT-DAG: ld [[SCRATCH2:[0-9]+]], 8([[FuncDesc]])
+; 64BIT-DAG: std [[SCRATCH1]], 0(3)
+; 64BIT-DAG: std [[SCRATCH2]], 8(3)
diff --git a/llvm/test/CodeGen/PowerPC/memintr32.ll b/llvm/test/CodeGen/PowerPC/memintr32.ll
index c07a5af..4f0a996 100644
--- a/llvm/test/CodeGen/PowerPC/memintr32.ll
+++ b/llvm/test/CodeGen/PowerPC/memintr32.ll
@@ -11,7 +11,7 @@ define i32 @memcmp_test(ptr nocapture noundef readonly %ptr1, ptr nocapture noun
; CHECK-AIX-32-P9-NEXT: mflr r0
; CHECK-AIX-32-P9-NEXT: stwu r1, -64(r1)
; CHECK-AIX-32-P9-NEXT: stw r0, 72(r1)
-; CHECK-AIX-32-P9-NEXT: bl .memcmp[PR]
+; CHECK-AIX-32-P9-NEXT: bl .___memcmp[PR]
; CHECK-AIX-32-P9-NEXT: nop
; CHECK-AIX-32-P9-NEXT: addi r1, r1, 64
; CHECK-AIX-32-P9-NEXT: lwz r0, 8(r1)
diff --git a/llvm/test/CodeGen/PowerPC/memintr64.ll b/llvm/test/CodeGen/PowerPC/memintr64.ll
index b3a6650..0b0e556 100644
--- a/llvm/test/CodeGen/PowerPC/memintr64.ll
+++ b/llvm/test/CodeGen/PowerPC/memintr64.ll
@@ -39,7 +39,7 @@ define noundef i32 @_Z11memcmp_testPKvS0_m(ptr noundef readonly captures(none) %
; CHECK-AIX-64-P9-NEXT: mflr r0
; CHECK-AIX-64-P9-NEXT: stdu r1, -112(r1)
; CHECK-AIX-64-P9-NEXT: std r0, 128(r1)
-; CHECK-AIX-64-P9-NEXT: bl .memcmp[PR]
+; CHECK-AIX-64-P9-NEXT: bl .___memcmp64[PR]
; CHECK-AIX-64-P9-NEXT: nop
; CHECK-AIX-64-P9-NEXT: addi r1, r1, 112
; CHECK-AIX-64-P9-NEXT: ld r0, 16(r1)
diff --git a/llvm/test/CodeGen/RISCV/features-info.ll b/llvm/test/CodeGen/RISCV/features-info.ll
index b94665b..fb53921 100644
--- a/llvm/test/CodeGen/RISCV/features-info.ll
+++ b/llvm/test/CodeGen/RISCV/features-info.ll
@@ -6,13 +6,21 @@
; CHECK-NEXT: 32bit - Implements RV32.
; CHECK-NEXT: 64bit - Implements RV64.
; CHECK-NEXT: a - 'A' (Atomic Instructions).
+; CHECK-NEXT: add-load-fusion - Enable ADD(.UW) + load macrofusion.
+; CHECK-NEXT: addi-load-fusion - Enable ADDI + load macrofusion.
; CHECK-NEXT: andes45 - Andes 45-Series processors.
; CHECK-NEXT: auipc-addi-fusion - Enable AUIPC+ADDI macrofusion.
+; CHECK-NEXT: auipc-load-fusion - Enable AUIPC + load macrofusion.
; CHECK-NEXT: b - 'B' (the collection of the Zba, Zbb, Zbs extensions).
+; CHECK-NEXT: bfext-fusion - Enable SLLI+SRLI (bitfield extract) macrofusion.
; CHECK-NEXT: c - 'C' (Compressed Instructions).
; CHECK-NEXT: conditional-cmv-fusion - Enable branch+c.mv fusion.
; CHECK-NEXT: d - 'D' (Double-Precision Floating-Point).
; CHECK-NEXT: disable-latency-sched-heuristic - Disable latency scheduling heuristic.
+; CHECK-NEXT: disable-misched-load-clustering - Disable load clustering in the machine scheduler.
+; CHECK-NEXT: disable-misched-store-clustering - Disable store clustering in the machine scheduler.
+; CHECK-NEXT: disable-postmisched-load-clustering - Disable PostRA load clustering in the machine scheduler.
+; CHECK-NEXT: disable-postmisched-store-clustering - Disable PostRA store clustering in the machine scheduler.
; CHECK-NEXT: dlen-factor-2 - Vector unit DLEN(data path width) is half of VLEN.
; CHECK-NEXT: e - 'E' (Embedded Instruction Set with 16 GPRs).
; CHECK-NEXT: exact-asm - Enable Exact Assembly (Disables Compression and Relaxation).
@@ -58,6 +66,7 @@
; CHECK-NEXT: ld-add-fusion - Enable LD+ADD macrofusion.
; CHECK-NEXT: log-vrgather - Has vrgather.vv with LMUL*log2(LMUL) latency
; CHECK-NEXT: lui-addi-fusion - Enable LUI+ADDI macro fusion.
+; CHECK-NEXT: lui-load-fusion - Enable LUI + load macrofusion.
; CHECK-NEXT: m - 'M' (Integer Multiplication and Division).
; CHECK-NEXT: mips-p8700 - MIPS p8700 processor.
; CHECK-NEXT: no-default-unroll - Disable default unroll preference..
@@ -130,6 +139,7 @@
; CHECK-NEXT: shvsatpa - 'Shvsatpa' (vsatp supports all modes supported by satp).
; CHECK-NEXT: shvstvala - 'Shvstvala' (vstval provides all needed values).
; CHECK-NEXT: shvstvecd - 'Shvstvecd' (vstvec supports Direct mode).
+; CHECK-NEXT: shxadd-load-fusion - Enable SH(1|2|3)ADD(.UW) + load macrofusion.
; CHECK-NEXT: sifive7 - SiFive 7-Series processors.
; CHECK-NEXT: smaia - 'Smaia' (Advanced Interrupt Architecture Machine Level).
; CHECK-NEXT: smcdeleg - 'Smcdeleg' (Counter Delegation Machine Level).
diff --git a/llvm/test/CodeGen/RISCV/half-convert.ll b/llvm/test/CodeGen/RISCV/half-convert.ll
index facb544..0c152e6 100644
--- a/llvm/test/CodeGen/RISCV/half-convert.ll
+++ b/llvm/test/CodeGen/RISCV/half-convert.ll
@@ -2262,12 +2262,12 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
; RV32IZHINX-NEXT: addi a2, a3, -1
; RV32IZHINX-NEXT: .LBB10_4: # %start
; RV32IZHINX-NEXT: feq.s a3, s0, s0
-; RV32IZHINX-NEXT: neg a4, a1
-; RV32IZHINX-NEXT: neg a1, s1
+; RV32IZHINX-NEXT: neg a4, s1
+; RV32IZHINX-NEXT: neg a5, a1
; RV32IZHINX-NEXT: neg a3, a3
-; RV32IZHINX-NEXT: and a0, a1, a0
+; RV32IZHINX-NEXT: and a0, a4, a0
; RV32IZHINX-NEXT: and a1, a3, a2
-; RV32IZHINX-NEXT: or a0, a4, a0
+; RV32IZHINX-NEXT: or a0, a5, a0
; RV32IZHINX-NEXT: and a0, a3, a0
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
@@ -2309,12 +2309,12 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
; RV32IZDINXZHINX-NEXT: addi a2, a3, -1
; RV32IZDINXZHINX-NEXT: .LBB10_4: # %start
; RV32IZDINXZHINX-NEXT: feq.s a3, s0, s0
-; RV32IZDINXZHINX-NEXT: neg a4, a1
-; RV32IZDINXZHINX-NEXT: neg a1, s1
+; RV32IZDINXZHINX-NEXT: neg a4, s1
+; RV32IZDINXZHINX-NEXT: neg a5, a1
; RV32IZDINXZHINX-NEXT: neg a3, a3
-; RV32IZDINXZHINX-NEXT: and a0, a1, a0
+; RV32IZDINXZHINX-NEXT: and a0, a4, a0
; RV32IZDINXZHINX-NEXT: and a1, a3, a2
-; RV32IZDINXZHINX-NEXT: or a0, a4, a0
+; RV32IZDINXZHINX-NEXT: or a0, a5, a0
; RV32IZDINXZHINX-NEXT: and a0, a3, a0
; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZDINXZHINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
@@ -2653,12 +2653,12 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
; CHECK32-IZHINXMIN-NEXT: addi a2, a3, -1
; CHECK32-IZHINXMIN-NEXT: .LBB10_4: # %start
; CHECK32-IZHINXMIN-NEXT: feq.s a3, s0, s0
-; CHECK32-IZHINXMIN-NEXT: neg a4, a1
-; CHECK32-IZHINXMIN-NEXT: neg a1, s1
+; CHECK32-IZHINXMIN-NEXT: neg a4, s1
+; CHECK32-IZHINXMIN-NEXT: neg a5, a1
; CHECK32-IZHINXMIN-NEXT: neg a3, a3
-; CHECK32-IZHINXMIN-NEXT: and a0, a1, a0
+; CHECK32-IZHINXMIN-NEXT: and a0, a4, a0
; CHECK32-IZHINXMIN-NEXT: and a1, a3, a2
-; CHECK32-IZHINXMIN-NEXT: or a0, a4, a0
+; CHECK32-IZHINXMIN-NEXT: or a0, a5, a0
; CHECK32-IZHINXMIN-NEXT: and a0, a3, a0
; CHECK32-IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK32-IZHINXMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
@@ -2701,12 +2701,12 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
; CHECK32-IZDINXZHINXMIN-NEXT: addi a2, a3, -1
; CHECK32-IZDINXZHINXMIN-NEXT: .LBB10_4: # %start
; CHECK32-IZDINXZHINXMIN-NEXT: feq.s a3, s0, s0
-; CHECK32-IZDINXZHINXMIN-NEXT: neg a4, a1
-; CHECK32-IZDINXZHINXMIN-NEXT: neg a1, s1
+; CHECK32-IZDINXZHINXMIN-NEXT: neg a4, s1
+; CHECK32-IZDINXZHINXMIN-NEXT: neg a5, a1
; CHECK32-IZDINXZHINXMIN-NEXT: neg a3, a3
-; CHECK32-IZDINXZHINXMIN-NEXT: and a0, a1, a0
+; CHECK32-IZDINXZHINXMIN-NEXT: and a0, a4, a0
; CHECK32-IZDINXZHINXMIN-NEXT: and a1, a3, a2
-; CHECK32-IZDINXZHINXMIN-NEXT: or a0, a4, a0
+; CHECK32-IZDINXZHINXMIN-NEXT: or a0, a5, a0
; CHECK32-IZDINXZHINXMIN-NEXT: and a0, a3, a0
; CHECK32-IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK32-IZDINXZHINXMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
@@ -2972,18 +2972,19 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
; RV32IZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZHINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZHINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IZHINX-NEXT: fcvt.s.h a0, a0
-; RV32IZHINX-NEXT: lui a1, 391168
-; RV32IZHINX-NEXT: addi a1, a1, -1
-; RV32IZHINX-NEXT: fle.s a2, zero, a0
-; RV32IZHINX-NEXT: flt.s a1, a1, a0
-; RV32IZHINX-NEXT: neg s0, a1
-; RV32IZHINX-NEXT: neg s1, a2
+; RV32IZHINX-NEXT: fcvt.s.h s0, a0
+; RV32IZHINX-NEXT: fle.s a0, zero, s0
+; RV32IZHINX-NEXT: neg s1, a0
+; RV32IZHINX-NEXT: mv a0, s0
; RV32IZHINX-NEXT: call __fixunssfdi
; RV32IZHINX-NEXT: and a0, s1, a0
+; RV32IZHINX-NEXT: lui a2, 391168
; RV32IZHINX-NEXT: and a1, s1, a1
-; RV32IZHINX-NEXT: or a0, s0, a0
-; RV32IZHINX-NEXT: or a1, s0, a1
+; RV32IZHINX-NEXT: addi a2, a2, -1
+; RV32IZHINX-NEXT: flt.s a2, a2, s0
+; RV32IZHINX-NEXT: neg a2, a2
+; RV32IZHINX-NEXT: or a0, a2, a0
+; RV32IZHINX-NEXT: or a1, a2, a1
; RV32IZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -3005,18 +3006,19 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
; RV32IZDINXZHINX-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZDINXZHINX-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32IZDINXZHINX-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IZDINXZHINX-NEXT: fcvt.s.h a0, a0
-; RV32IZDINXZHINX-NEXT: lui a1, 391168
-; RV32IZDINXZHINX-NEXT: addi a1, a1, -1
-; RV32IZDINXZHINX-NEXT: fle.s a2, zero, a0
-; RV32IZDINXZHINX-NEXT: flt.s a1, a1, a0
-; RV32IZDINXZHINX-NEXT: neg s0, a1
-; RV32IZDINXZHINX-NEXT: neg s1, a2
+; RV32IZDINXZHINX-NEXT: fcvt.s.h s0, a0
+; RV32IZDINXZHINX-NEXT: fle.s a0, zero, s0
+; RV32IZDINXZHINX-NEXT: neg s1, a0
+; RV32IZDINXZHINX-NEXT: mv a0, s0
; RV32IZDINXZHINX-NEXT: call __fixunssfdi
; RV32IZDINXZHINX-NEXT: and a0, s1, a0
+; RV32IZDINXZHINX-NEXT: lui a2, 391168
; RV32IZDINXZHINX-NEXT: and a1, s1, a1
-; RV32IZDINXZHINX-NEXT: or a0, s0, a0
-; RV32IZDINXZHINX-NEXT: or a1, s0, a1
+; RV32IZDINXZHINX-NEXT: addi a2, a2, -1
+; RV32IZDINXZHINX-NEXT: flt.s a2, a2, s0
+; RV32IZDINXZHINX-NEXT: neg a2, a2
+; RV32IZDINXZHINX-NEXT: or a0, a2, a0
+; RV32IZDINXZHINX-NEXT: or a1, a2, a1
; RV32IZDINXZHINX-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZDINXZHINX-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32IZDINXZHINX-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -3217,18 +3219,19 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
; CHECK32-IZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; CHECK32-IZHINXMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; CHECK32-IZHINXMIN-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; CHECK32-IZHINXMIN-NEXT: fcvt.s.h a0, a0
-; CHECK32-IZHINXMIN-NEXT: lui a1, 391168
-; CHECK32-IZHINXMIN-NEXT: addi a1, a1, -1
-; CHECK32-IZHINXMIN-NEXT: fle.s a2, zero, a0
-; CHECK32-IZHINXMIN-NEXT: flt.s a1, a1, a0
-; CHECK32-IZHINXMIN-NEXT: neg s0, a1
-; CHECK32-IZHINXMIN-NEXT: neg s1, a2
+; CHECK32-IZHINXMIN-NEXT: fcvt.s.h s0, a0
+; CHECK32-IZHINXMIN-NEXT: fle.s a0, zero, s0
+; CHECK32-IZHINXMIN-NEXT: neg s1, a0
+; CHECK32-IZHINXMIN-NEXT: mv a0, s0
; CHECK32-IZHINXMIN-NEXT: call __fixunssfdi
; CHECK32-IZHINXMIN-NEXT: and a0, s1, a0
+; CHECK32-IZHINXMIN-NEXT: lui a2, 391168
; CHECK32-IZHINXMIN-NEXT: and a1, s1, a1
-; CHECK32-IZHINXMIN-NEXT: or a0, s0, a0
-; CHECK32-IZHINXMIN-NEXT: or a1, s0, a1
+; CHECK32-IZHINXMIN-NEXT: addi a2, a2, -1
+; CHECK32-IZHINXMIN-NEXT: flt.s a2, a2, s0
+; CHECK32-IZHINXMIN-NEXT: neg a2, a2
+; CHECK32-IZHINXMIN-NEXT: or a0, a2, a0
+; CHECK32-IZHINXMIN-NEXT: or a1, a2, a1
; CHECK32-IZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK32-IZHINXMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; CHECK32-IZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
@@ -3251,18 +3254,19 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
; CHECK32-IZDINXZHINXMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; CHECK32-IZDINXZHINXMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; CHECK32-IZDINXZHINXMIN-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.h a0, a0
-; CHECK32-IZDINXZHINXMIN-NEXT: lui a1, 391168
-; CHECK32-IZDINXZHINXMIN-NEXT: addi a1, a1, -1
-; CHECK32-IZDINXZHINXMIN-NEXT: fle.s a2, zero, a0
-; CHECK32-IZDINXZHINXMIN-NEXT: flt.s a1, a1, a0
-; CHECK32-IZDINXZHINXMIN-NEXT: neg s0, a1
-; CHECK32-IZDINXZHINXMIN-NEXT: neg s1, a2
+; CHECK32-IZDINXZHINXMIN-NEXT: fcvt.s.h s0, a0
+; CHECK32-IZDINXZHINXMIN-NEXT: fle.s a0, zero, s0
+; CHECK32-IZDINXZHINXMIN-NEXT: neg s1, a0
+; CHECK32-IZDINXZHINXMIN-NEXT: mv a0, s0
; CHECK32-IZDINXZHINXMIN-NEXT: call __fixunssfdi
; CHECK32-IZDINXZHINXMIN-NEXT: and a0, s1, a0
+; CHECK32-IZDINXZHINXMIN-NEXT: lui a2, 391168
; CHECK32-IZDINXZHINXMIN-NEXT: and a1, s1, a1
-; CHECK32-IZDINXZHINXMIN-NEXT: or a0, s0, a0
-; CHECK32-IZDINXZHINXMIN-NEXT: or a1, s0, a1
+; CHECK32-IZDINXZHINXMIN-NEXT: addi a2, a2, -1
+; CHECK32-IZDINXZHINXMIN-NEXT: flt.s a2, a2, s0
+; CHECK32-IZDINXZHINXMIN-NEXT: neg a2, a2
+; CHECK32-IZDINXZHINXMIN-NEXT: or a0, a2, a0
+; CHECK32-IZDINXZHINXMIN-NEXT: or a1, a2, a1
; CHECK32-IZDINXZHINXMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; CHECK32-IZDINXZHINXMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; CHECK32-IZDINXZHINXMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/macro-fusions.mir b/llvm/test/CodeGen/RISCV/macro-fusions.mir
index 1346414..ae5b52d 100644
--- a/llvm/test/CodeGen/RISCV/macro-fusions.mir
+++ b/llvm/test/CodeGen/RISCV/macro-fusions.mir
@@ -2,7 +2,12 @@
# RUN: llc -mtriple=riscv64-linux-gnu -x=mir < %s \
# RUN: -debug-only=machine-scheduler -start-before=machine-scheduler 2>&1 \
# RUN: -mattr=+lui-addi-fusion,+auipc-addi-fusion,+zexth-fusion,+zextw-fusion,+shifted-zextw-fusion,+ld-add-fusion \
+# RUN: -mattr=+add-load-fusion,+auipc-load-fusion,+lui-load-fusion,+addi-load-fusion \
+# RUN: -mattr=+zba,+shxadd-load-fusion \
# RUN: | FileCheck %s
+# RUN: llc -mtriple=riscv64-linux-gnu -x=mir < %s \
+# RUN: -debug-only=machine-scheduler -start-before=machine-scheduler 2>&1 \
+# RUN: -mattr=+zba,+bfext-fusion | FileCheck --check-prefixes=CHECK-BFEXT %s
# CHECK: lui_addi:%bb.0
# CHECK: Macro fuse: {{.*}}LUI - ADDI
@@ -174,3 +179,1374 @@ body: |
$x11 = COPY %5
PseudoRET
...
+
+# CHECK: add_lb
+# CHECK: Macro fuse: {{.*}}ADD - LB
+---
+name: add_lb
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LB %3, 0
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: add_lh
+# CHECK: Macro fuse: {{.*}}ADD - LH
+---
+name: add_lh
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LH %3, 0
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: add_lw
+# CHECK: Macro fuse: {{.*}}ADD - LW
+---
+name: add_lw
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LW %3, 0
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: add_lbu
+# CHECK: Macro fuse: {{.*}}ADD - LBU
+---
+name: add_lbu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LBU %3, 0
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: add_lhu
+# CHECK: Macro fuse: {{.*}}ADD - LHU
+---
+name: add_lhu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LHU %3, 0
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: add_lwu
+# CHECK: Macro fuse: {{.*}}ADD - LWU
+---
+name: add_lwu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LWU %3, 0
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: auipc_lb
+# CHECK: Macro fuse: {{.*}}AUIPC - LB
+---
+name: auipc_lb
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+ %1:gpr = COPY $x10
+ %2:gpr = AUIPC 1
+ %3:gpr = XORI %1, 2
+ %4:gpr = LB %2, 4
+ $x10 = COPY %3
+ $x11 = COPY %4
+ PseudoRET
+...
+
+# CHECK: auipc_lh
+# CHECK: Macro fuse: {{.*}}AUIPC - LH
+---
+name: auipc_lh
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+ %1:gpr = COPY $x10
+ %2:gpr = AUIPC 1
+ %3:gpr = XORI %1, 2
+ %4:gpr = LH %2, 4
+ $x10 = COPY %3
+ $x11 = COPY %4
+ PseudoRET
+...
+
+# CHECK: auipc_lw
+# CHECK: Macro fuse: {{.*}}AUIPC - LW
+---
+name: auipc_lw
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+ %1:gpr = COPY $x10
+ %2:gpr = AUIPC 1
+ %3:gpr = XORI %1, 2
+ %4:gpr = LW %2, 4
+ $x10 = COPY %3
+ $x11 = COPY %4
+ PseudoRET
+...
+
+# CHECK: auipc_ld
+# CHECK: Macro fuse: {{.*}}AUIPC - LD
+---
+name: auipc_ld
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+ %1:gpr = COPY $x10
+ %2:gpr = AUIPC 1
+ %3:gpr = XORI %1, 2
+ %4:gpr = LD %2, 4
+ $x10 = COPY %3
+ $x11 = COPY %4
+ PseudoRET
+...
+
+# CHECK: auipc_lbu
+# CHECK: Macro fuse: {{.*}}AUIPC - LBU
+---
+name: auipc_lbu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+ %1:gpr = COPY $x10
+ %2:gpr = AUIPC 1
+ %3:gpr = XORI %1, 2
+ %4:gpr = LBU %2, 4
+ $x10 = COPY %3
+ $x11 = COPY %4
+ PseudoRET
+...
+
+# CHECK: auipc_lhu
+# CHECK: Macro fuse: {{.*}}AUIPC - LHU
+---
+name: auipc_lhu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+ %1:gpr = COPY $x10
+ %2:gpr = AUIPC 1
+ %3:gpr = XORI %1, 2
+ %4:gpr = LHU %2, 4
+ $x10 = COPY %3
+ $x11 = COPY %4
+ PseudoRET
+...
+
+# CHECK: auipc_lwu
+# CHECK: Macro fuse: {{.*}}AUIPC - LWU
+---
+name: auipc_lwu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+ %1:gpr = COPY $x10
+ %2:gpr = AUIPC 1
+ %3:gpr = XORI %1, 2
+ %4:gpr = LWU %2, 4
+ $x10 = COPY %3
+ $x11 = COPY %4
+ PseudoRET
+...
+
+# CHECK: lui_lb
+# CHECK: Macro fuse: {{.*}}LUI - LB
+---
+name: lui_lb
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+ %1:gpr = COPY $x10
+ %2:gpr = LUI 1
+ %3:gpr = XORI %1, 2
+ %4:gpr = LB %2, 4
+ $x10 = COPY %3
+ $x11 = COPY %4
+ PseudoRET
+...
+
+# CHECK: lui_lh
+# CHECK: Macro fuse: {{.*}}LUI - LH
+---
+name: lui_lh
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+ %1:gpr = COPY $x10
+ %2:gpr = LUI 1
+ %3:gpr = XORI %1, 2
+ %4:gpr = LH %2, 4
+ $x10 = COPY %3
+ $x11 = COPY %4
+ PseudoRET
+...
+
+# CHECK: lui_lw
+# CHECK: Macro fuse: {{.*}}LUI - LW
+---
+name: lui_lw
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+ %1:gpr = COPY $x10
+ %2:gpr = LUI 1
+ %3:gpr = XORI %1, 2
+ %4:gpr = LW %2, 4
+ $x10 = COPY %3
+ $x11 = COPY %4
+ PseudoRET
+...
+
+# CHECK: lui_ld
+# CHECK: Macro fuse: {{.*}}LUI - LD
+---
+name: lui_ld
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+ %1:gpr = COPY $x10
+ %2:gpr = LUI 1
+ %3:gpr = XORI %1, 2
+ %4:gpr = LD %2, 4
+ $x10 = COPY %3
+ $x11 = COPY %4
+ PseudoRET
+...
+
+# CHECK: lui_lbu
+# CHECK: Macro fuse: {{.*}}LUI - LBU
+---
+name: lui_lbu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+ %1:gpr = COPY $x10
+ %2:gpr = LUI 1
+ %3:gpr = XORI %1, 2
+ %4:gpr = LBU %2, 4
+ $x10 = COPY %3
+ $x11 = COPY %4
+ PseudoRET
+...
+
+# CHECK: lui_lhu
+# CHECK: Macro fuse: {{.*}}LUI - LHU
+---
+name: lui_lhu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+ %1:gpr = COPY $x10
+ %2:gpr = LUI 1
+ %3:gpr = XORI %1, 2
+ %4:gpr = LHU %2, 4
+ $x10 = COPY %3
+ $x11 = COPY %4
+ PseudoRET
+...
+
+# CHECK: lui_lwu
+# CHECK: Macro fuse: {{.*}}LUI - LWU
+---
+name: lui_lwu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+ %1:gpr = COPY $x10
+ %2:gpr = LUI 1
+ %3:gpr = XORI %1, 2
+ %4:gpr = LWU %2, 4
+ $x10 = COPY %3
+ $x11 = COPY %4
+ PseudoRET
+...
+
+# CHECK-BFEXT: bitfield_extract
+# CHECK-BFEXT: Macro fuse: {{.*}}SLLI - SRLI
+---
+name: bitfield_extract
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10
+ %1:gpr = COPY $x10
+ %2:gpr = SLLI %1, 31
+ %3:gpr = XORI %1, 3
+ %4:gpr = SRLI %2, 48
+ $x10 = COPY %3
+ $x11 = COPY %4
+ PseudoRET
+...
+
+# CHECK: addi_lb
+# CHECK: Macro fuse: {{.*}}ADDI - LB
+---
+name: addi_lb
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = ADDI %1, 8
+ %4:gpr = XORI %2, 3
+ %5:gpr = LB %3, 0
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: addi_lh
+# CHECK: Macro fuse: {{.*}}ADDI - LH
+---
+name: addi_lh
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = ADDI %1, 8
+ %4:gpr = XORI %2, 3
+ %5:gpr = LH %3, 0
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: addi_lw
+# CHECK: Macro fuse: {{.*}}ADDI - LW
+---
+name: addi_lw
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = ADDI %1, 8
+ %4:gpr = XORI %2, 3
+ %5:gpr = LW %3, 0
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: addi_ld
+# CHECK: Macro fuse: {{.*}}ADDI - LD
+---
+name: addi_ld
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = ADDI %1, 8
+ %4:gpr = XORI %2, 3
+ %5:gpr = LD %3, 0
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: addi_lbu
+# CHECK: Macro fuse: {{.*}}ADDI - LBU
+---
+name: addi_lbu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = ADDI %1, 8
+ %4:gpr = XORI %2, 3
+ %5:gpr = LBU %3, 0
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: addi_lhu
+# CHECK: Macro fuse: {{.*}}ADDI - LHU
+---
+name: addi_lhu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = ADDI %1, 8
+ %4:gpr = XORI %2, 3
+ %5:gpr = LHU %3, 0
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: addi_lwu
+# CHECK: Macro fuse: {{.*}}ADDI - LWU
+---
+name: addi_lwu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = ADDI %1, 8
+ %4:gpr = XORI %2, 3
+ %5:gpr = LWU %3, 0
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: adduw_lb
+# CHECK: Macro fuse: {{.*}}ADD_UW - LB
+---
+name: adduw_lb
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LB %3, 0
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: adduw_lh
+# CHECK: Macro fuse: {{.*}}ADD_UW - LH
+---
+name: adduw_lh
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LH %3, 0
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: adduw_lw
+# CHECK: Macro fuse: {{.*}}ADD_UW - LW
+---
+name: adduw_lw
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LW %3, 0
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: adduw_ld
+# CHECK: Macro fuse: {{.*}}ADD_UW - LD
+---
+name: adduw_ld
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LD %3, 0
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: adduw_lbu
+# CHECK: Macro fuse: {{.*}}ADD_UW - LBU
+---
+name: adduw_lbu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LBU %3, 0
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: adduw_lhu
+# CHECK: Macro fuse: {{.*}}ADD_UW - LHU
+---
+name: adduw_lhu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LHU %3, 0
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: adduw_lwu
+# CHECK: Macro fuse: {{.*}}ADD_UW - LWU
+---
+name: adduw_lwu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LWU %3, 0
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh1add_lb
+# CHECK: Macro fuse: {{.*}}SH1ADD - LB
+---
+name: sh1add_lb
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH1ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LB %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh2add_lb
+# CHECK: Macro fuse: {{.*}}SH2ADD - LB
+---
+name: sh2add_lb
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH2ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LB %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh3add_lb
+# CHECK: Macro fuse: {{.*}}SH3ADD - LB
+---
+name: sh3add_lb
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH3ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LB %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh1add_lh
+# CHECK: Macro fuse: {{.*}}SH1ADD - LH
+---
+name: sh1add_lh
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH1ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LH %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh2add_lh
+# CHECK: Macro fuse: {{.*}}SH2ADD - LH
+---
+name: sh2add_lh
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH2ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LH %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh3add_lh
+# CHECK: Macro fuse: {{.*}}SH3ADD - LH
+---
+name: sh3add_lh
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH3ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LH %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh1add_lw
+# CHECK: Macro fuse: {{.*}}SH1ADD - LW
+---
+name: sh1add_lw
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH1ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LW %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh2add_lw
+# CHECK: Macro fuse: {{.*}}SH2ADD - LW
+---
+name: sh2add_lw
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH2ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LW %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh3add_lw
+# CHECK: Macro fuse: {{.*}}SH3ADD - LW
+---
+name: sh3add_lw
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH3ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LW %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh1add_ld
+# CHECK: Macro fuse: {{.*}}SH1ADD - LD
+---
+name: sh1add_ld
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH1ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LD %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh2add_ld
+# CHECK: Macro fuse: {{.*}}SH2ADD - LD
+---
+name: sh2add_ld
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH2ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LD %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh3add_ld
+# CHECK: Macro fuse: {{.*}}SH3ADD - LD
+---
+name: sh3add_ld
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH3ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LD %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh1add_lbu
+# CHECK: Macro fuse: {{.*}}SH1ADD - LBU
+---
+name: sh1add_lbu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH1ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LBU %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh2add_lbu
+# CHECK: Macro fuse: {{.*}}SH2ADD - LBU
+---
+name: sh2add_lbu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH2ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LBU %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh3add_lbu
+# CHECK: Macro fuse: {{.*}}SH3ADD - LBU
+---
+name: sh3add_lbu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH3ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LBU %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh1add_lhu
+# CHECK: Macro fuse: {{.*}}SH1ADD - LHU
+---
+name: sh1add_lhu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH1ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LHU %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh2add_lhu
+# CHECK: Macro fuse: {{.*}}SH2ADD - LHU
+---
+name: sh2add_lhu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH2ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LHU %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh3add_lhu
+# CHECK: Macro fuse: {{.*}}SH3ADD - LHU
+---
+name: sh3add_lhu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH3ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LHU %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh1add_lwu
+# CHECK: Macro fuse: {{.*}}SH1ADD - LWU
+---
+name: sh1add_lwu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH1ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LWU %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh2add_lwu
+# CHECK: Macro fuse: {{.*}}SH2ADD - LWU
+---
+name: sh2add_lwu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH2ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LWU %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh3add_lwu
+# CHECK: Macro fuse: {{.*}}SH3ADD - LWU
+---
+name: sh3add_lwu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH3ADD %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LWU %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh1adduw_lb
+# CHECK: Macro fuse: {{.*}}SH1ADD_UW - LB
+---
+name: sh1adduw_lb
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH1ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LB %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh2adduw_lb
+# CHECK: Macro fuse: {{.*}}SH2ADD_UW - LB
+---
+name: sh2adduw_lb
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH2ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LB %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh3adduw_lb
+# CHECK: Macro fuse: {{.*}}SH3ADD_UW - LB
+---
+name: sh3adduw_lb
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH3ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LB %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh1adduw_lh
+# CHECK: Macro fuse: {{.*}}SH1ADD_UW - LH
+---
+name: sh1adduw_lh
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH1ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LH %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh2adduw_lh
+# CHECK: Macro fuse: {{.*}}SH2ADD_UW - LH
+---
+name: sh2adduw_lh
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH2ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LH %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh3adduw_lh
+# CHECK: Macro fuse: {{.*}}SH3ADD_UW - LH
+---
+name: sh3adduw_lh
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH3ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LH %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh1adduw_lw
+# CHECK: Macro fuse: {{.*}}SH1ADD_UW - LW
+---
+name: sh1adduw_lw
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH1ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LW %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh2adduw_lw
+# CHECK: Macro fuse: {{.*}}SH2ADD_UW - LW
+---
+name: sh2adduw_lw
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH2ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LW %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh3adduw_lw
+# CHECK: Macro fuse: {{.*}}SH3ADD_UW - LW
+---
+name: sh3adduw_lw
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH3ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LW %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh1adduw_ld
+# CHECK: Macro fuse: {{.*}}SH1ADD_UW - LD
+---
+name: sh1adduw_ld
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH1ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LD %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh2adduw_ld
+# CHECK: Macro fuse: {{.*}}SH2ADD_UW - LD
+---
+name: sh2adduw_ld
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH2ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LD %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh3adduw_ld
+# CHECK: Macro fuse: {{.*}}SH3ADD_UW - LD
+---
+name: sh3adduw_ld
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH3ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LD %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh1adduw_lbu
+# CHECK: Macro fuse: {{.*}}SH1ADD_UW - LBU
+---
+name: sh1adduw_lbu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH1ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LBU %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh2adduw_lbu
+# CHECK: Macro fuse: {{.*}}SH2ADD_UW - LBU
+---
+name: sh2adduw_lbu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH2ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LBU %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh3adduw_lbu
+# CHECK: Macro fuse: {{.*}}SH3ADD_UW - LBU
+---
+name: sh3adduw_lbu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH3ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LBU %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh1adduw_lhu
+# CHECK: Macro fuse: {{.*}}SH1ADD_UW - LHU
+---
+name: sh1adduw_lhu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH1ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LHU %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh2adduw_lhu
+# CHECK: Macro fuse: {{.*}}SH2ADD_UW - LHU
+---
+name: sh2adduw_lhu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH2ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LHU %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh3adduw_lhu
+# CHECK: Macro fuse: {{.*}}SH3ADD_UW - LHU
+---
+name: sh3adduw_lhu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH3ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LHU %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh1adduw_lwu
+# CHECK: Macro fuse: {{.*}}SH1ADD_UW - LWU
+---
+name: sh1adduw_lwu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH1ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LWU %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh2adduw_lwu
+# CHECK: Macro fuse: {{.*}}SH2ADD_UW - LWU
+---
+name: sh2adduw_lwu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH2ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LWU %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
+
+# CHECK: sh3adduw_lwu
+# CHECK: Macro fuse: {{.*}}SH3ADD_UW - LWU
+---
+name: sh3adduw_lwu
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ %1:gpr = COPY $x10
+ %2:gpr = COPY $x11
+ %3:gpr = SH3ADD_UW %1, %2
+ %4:gpr = XORI %2, 3
+ %5:gpr = LWU %3, 8
+ $x10 = COPY %4
+ $x11 = COPY %5
+ PseudoRET
+...
diff --git a/llvm/test/CodeGen/RISCV/misched-load-clustering.ll b/llvm/test/CodeGen/RISCV/misched-load-clustering.ll
index 160f0ae..abdc1ba 100644
--- a/llvm/test/CodeGen/RISCV/misched-load-clustering.ll
+++ b/llvm/test/CodeGen/RISCV/misched-load-clustering.ll
@@ -1,17 +1,42 @@
; REQUIRES: asserts
-; RUN: llc -mtriple=riscv32 -verify-misched -riscv-misched-load-store-clustering=false \
+;
+; Disable all misched clustering
+; RUN: llc -mtriple=riscv32 -verify-misched \
+; RUN: -mattr=+disable-misched-load-clustering,+disable-misched-store-clustering \
; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \
; RUN: | FileCheck -check-prefix=NOCLUSTER %s
-; RUN: llc -mtriple=riscv64 -verify-misched -riscv-misched-load-store-clustering=false \
+; RUN: llc -mtriple=riscv64 -verify-misched \
+; RUN: -mattr=+disable-misched-load-clustering,+disable-misched-store-clustering \
; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \
; RUN: | FileCheck -check-prefix=NOCLUSTER %s
+;
+; ST misched clustering only
+; RUN: llc -mtriple=riscv32 -verify-misched \
+; RUN: -mattr=+disable-misched-load-clustering \
+; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \
+; RUN: | FileCheck -check-prefix=STCLUSTER %s
+; RUN: llc -mtriple=riscv64 -verify-misched \
+; RUN: -mattr=+disable-misched-load-clustering \
+; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \
+; RUN: | FileCheck -check-prefix=STCLUSTER %s
+;
+; LD misched clustering only
; RUN: llc -mtriple=riscv32 -verify-misched \
+; RUN: -mattr=+disable-misched-store-clustering \
; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \
; RUN: | FileCheck -check-prefix=LDCLUSTER %s
; RUN: llc -mtriple=riscv64 -verify-misched \
+; RUN: -mattr=+disable-misched-store-clustering \
; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \
; RUN: | FileCheck -check-prefix=LDCLUSTER %s
-
+;
+; Default misched cluster settings (i.e. both LD and ST clustering)
+; RUN: llc -mtriple=riscv32 -verify-misched \
+; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \
+; RUN: | FileCheck -check-prefix=DEFAULTCLUSTER %s
+; RUN: llc -mtriple=riscv64 -verify-misched \
+; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \
+; RUN: | FileCheck -check-prefix=DEFAULTCLUSTER %s
define i32 @load_clustering_1(ptr nocapture %p) {
; NOCLUSTER: ********** MI Scheduling **********
@@ -22,6 +47,14 @@ define i32 @load_clustering_1(ptr nocapture %p) {
; NOCLUSTER: SU(4): %4:gpr = LW %0:gpr, 4
; NOCLUSTER: SU(5): %6:gpr = LW %0:gpr, 16
;
+; STCLUSTER: ********** MI Scheduling **********
+; STCLUSTER-LABEL: load_clustering_1:%bb.0
+; STCLUSTER: *** Final schedule for %bb.0 ***
+; STCLUSTER: SU(1): %1:gpr = LW %0:gpr, 12
+; STCLUSTER: SU(2): %2:gpr = LW %0:gpr, 8
+; STCLUSTER: SU(4): %4:gpr = LW %0:gpr, 4
+; STCLUSTER: SU(5): %6:gpr = LW %0:gpr, 16
+;
; LDCLUSTER: ********** MI Scheduling **********
; LDCLUSTER-LABEL: load_clustering_1:%bb.0
; LDCLUSTER: *** Final schedule for %bb.0 ***
@@ -29,6 +62,14 @@ define i32 @load_clustering_1(ptr nocapture %p) {
; LDCLUSTER: SU(2): %2:gpr = LW %0:gpr, 8
; LDCLUSTER: SU(1): %1:gpr = LW %0:gpr, 12
; LDCLUSTER: SU(5): %6:gpr = LW %0:gpr, 16
+;
+; DEFAULTCLUSTER: ********** MI Scheduling **********
+; DEFAULTCLUSTER-LABEL: load_clustering_1:%bb.0
+; DEFAULTCLUSTER: *** Final schedule for %bb.0 ***
+; DEFAULTCLUSTER: SU(4): %4:gpr = LW %0:gpr, 4
+; DEFAULTCLUSTER: SU(2): %2:gpr = LW %0:gpr, 8
+; DEFAULTCLUSTER: SU(1): %1:gpr = LW %0:gpr, 12
+; DEFAULTCLUSTER: SU(5): %6:gpr = LW %0:gpr, 16
entry:
%arrayidx0 = getelementptr inbounds i32, ptr %p, i32 3
%val0 = load i32, ptr %arrayidx0
diff --git a/llvm/test/CodeGen/RISCV/misched-mem-clustering.mir b/llvm/test/CodeGen/RISCV/misched-mem-clustering.mir
index 21398d3..01960f9 100644
--- a/llvm/test/CodeGen/RISCV/misched-mem-clustering.mir
+++ b/llvm/test/CodeGen/RISCV/misched-mem-clustering.mir
@@ -1,10 +1,12 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
# RUN: llc -mtriple=riscv64 -x mir -mcpu=sifive-p470 -verify-misched -enable-post-misched=false \
-# RUN: -riscv-postmisched-load-store-clustering=false -debug-only=machine-scheduler \
+# RUN: -mattr=+disable-postmisched-load-clustering \
+# RUN: -mattr=+disable-postmisched-store-clustering -debug-only=machine-scheduler \
# RUN: -start-before=machine-scheduler -stop-after=postmisched -misched-regpressure=false -o - 2>&1 < %s \
# RUN: | FileCheck -check-prefix=NOPOSTMISCHED %s
# RUN: llc -mtriple=riscv64 -x mir -mcpu=sifive-p470 -mattr=+use-postra-scheduler -verify-misched -enable-post-misched=true \
-# RUN: -riscv-postmisched-load-store-clustering=false -debug-only=machine-scheduler \
+# RUN: -mattr=+disable-postmisched-load-clustering \
+# RUN: -mattr=+disable-postmisched-store-clustering -debug-only=machine-scheduler \
# RUN: -start-before=machine-scheduler -stop-after=postmisched -misched-regpressure=false -o - 2>&1 < %s \
# RUN: | FileCheck -check-prefix=NOCLUSTER %s
# RUN: llc -mtriple=riscv64 -x mir -mcpu=sifive-p470 -mattr=+use-postra-scheduler -verify-misched -enable-post-misched=true \
diff --git a/llvm/test/CodeGen/RISCV/misched-store-clustering.ll b/llvm/test/CodeGen/RISCV/misched-store-clustering.ll
new file mode 100644
index 0000000..02e853d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/misched-store-clustering.ll
@@ -0,0 +1,83 @@
+; REQUIRES: asserts
+;
+; Disable all misched clustering
+; RUN: llc -mtriple=riscv32 -verify-misched \
+; RUN: -mattr=+disable-misched-load-clustering,+disable-misched-store-clustering \
+; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \
+; RUN: | FileCheck -check-prefix=NOCLUSTER %s
+; RUN: llc -mtriple=riscv64 -verify-misched \
+; RUN: -mattr=+disable-misched-load-clustering,+disable-misched-store-clustering \
+; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \
+; RUN: | FileCheck -check-prefix=NOCLUSTER %s
+;
+; ST misched clustering only
+; RUN: llc -mtriple=riscv32 -verify-misched \
+; RUN: -mattr=+disable-misched-load-clustering \
+; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \
+; RUN: | FileCheck -check-prefix=STCLUSTER %s
+; RUN: llc -mtriple=riscv64 -verify-misched \
+; RUN: -mattr=+disable-misched-load-clustering \
+; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \
+; RUN: | FileCheck -check-prefix=STCLUSTER %s
+;
+; LD misched clustering only
+; RUN: llc -mtriple=riscv32 -verify-misched \
+; RUN: -mattr=+disable-misched-store-clustering \
+; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \
+; RUN: | FileCheck -check-prefix=LDCLUSTER %s
+; RUN: llc -mtriple=riscv64 -verify-misched \
+; RUN: -mattr=+disable-misched-store-clustering \
+; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \
+; RUN: | FileCheck -check-prefix=LDCLUSTER %s
+;
+; Default misched cluster settings (i.e. both LD and ST clustering)
+; RUN: llc -mtriple=riscv32 -verify-misched \
+; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \
+; RUN: | FileCheck -check-prefix=DEFAULTCLUSTER %s
+; RUN: llc -mtriple=riscv64 -verify-misched \
+; RUN: -debug-only=machine-scheduler -o - 2>&1 < %s \
+; RUN: | FileCheck -check-prefix=DEFAULTCLUSTER %s
+
+define i32 @store_clustering_1(ptr nocapture %p, i32 %v) {
+; NOCLUSTER: ********** MI Scheduling **********
+; NOCLUSTER-LABEL: store_clustering_1:%bb.0
+; NOCLUSTER: *** Final schedule for %bb.0 ***
+; NOCLUSTER: SU(2): SW %1:gpr, %0:gpr, 12 :: (store (s32) into %ir.arrayidx0)
+; NOCLUSTER: SU(3): SW %1:gpr, %0:gpr, 8 :: (store (s32) into %ir.arrayidx1)
+; NOCLUSTER: SU(4): SW %1:gpr, %0:gpr, 4 :: (store (s32) into %ir.arrayidx2)
+; NOCLUSTER: SU(5): SW %1:gpr, %0:gpr, 16 :: (store (s32) into %ir.arrayidx3)
+;
+; STCLUSTER: ********** MI Scheduling **********
+; STCLUSTER-LABEL: store_clustering_1:%bb.0
+; STCLUSTER: *** Final schedule for %bb.0 ***
+; STCLUSTER: SU(4): SW %1:gpr, %0:gpr, 4 :: (store (s32) into %ir.arrayidx2)
+; STCLUSTER: SU(3): SW %1:gpr, %0:gpr, 8 :: (store (s32) into %ir.arrayidx1)
+; STCLUSTER: SU(2): SW %1:gpr, %0:gpr, 12 :: (store (s32) into %ir.arrayidx0)
+; STCLUSTER: SU(5): SW %1:gpr, %0:gpr, 16 :: (store (s32) into %ir.arrayidx3)
+;
+; LDCLUSTER: ********** MI Scheduling **********
+; LDCLUSTER-LABEL: store_clustering_1:%bb.0
+; LDCLUSTER: *** Final schedule for %bb.0 ***
+; LDCLUSTER: SU(2): SW %1:gpr, %0:gpr, 12 :: (store (s32) into %ir.arrayidx0)
+; LDCLUSTER: SU(3): SW %1:gpr, %0:gpr, 8 :: (store (s32) into %ir.arrayidx1)
+; LDCLUSTER: SU(4): SW %1:gpr, %0:gpr, 4 :: (store (s32) into %ir.arrayidx2)
+; LDCLUSTER: SU(5): SW %1:gpr, %0:gpr, 16 :: (store (s32) into %ir.arrayidx3)
+;
+; DEFAULTCLUSTER: ********** MI Scheduling **********
+; DEFAULTCLUSTER-LABEL: store_clustering_1:%bb.0
+; DEFAULTCLUSTER: *** Final schedule for %bb.0 ***
+; DEFAULTCLUSTER: SU(4): SW %1:gpr, %0:gpr, 4 :: (store (s32) into %ir.arrayidx2)
+; DEFAULTCLUSTER: SU(3): SW %1:gpr, %0:gpr, 8 :: (store (s32) into %ir.arrayidx1)
+; DEFAULTCLUSTER: SU(2): SW %1:gpr, %0:gpr, 12 :: (store (s32) into %ir.arrayidx0)
+; DEFAULTCLUSTER: SU(5): SW %1:gpr, %0:gpr, 16 :: (store (s32) into %ir.arrayidx3)
+entry:
+ %arrayidx0 = getelementptr inbounds i32, ptr %p, i32 3
+ store i32 %v, ptr %arrayidx0
+ %arrayidx1 = getelementptr inbounds i32, ptr %p, i32 2
+ store i32 %v, ptr %arrayidx1
+ %arrayidx2 = getelementptr inbounds i32, ptr %p, i32 1
+ store i32 %v, ptr %arrayidx2
+ %arrayidx3 = getelementptr inbounds i32, ptr %p, i32 4
+ store i32 %v, ptr %arrayidx3
+ ret i32 %v
+}
diff --git a/llvm/test/CodeGen/RISCV/rv32zbkb.ll b/llvm/test/CodeGen/RISCV/rv32zbkb.ll
index 7ebbd78..42d326e 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbkb.ll
@@ -350,10 +350,43 @@ define i32 @pack_lo_packh_hi_packh(i8 zeroext %0, i8 zeroext %1, i8 zeroext %2,
ret i32 %j
}
+define i32 @pack_lo_packh_hi_packh_2(i8 %0, i8 %1, i8 %2, i8 %3) nounwind {
+; RV32I-LABEL: pack_lo_packh_hi_packh_2:
+; RV32I: # %bb.0:
+; RV32I-NEXT: zext.b a0, a0
+; RV32I-NEXT: zext.b a1, a1
+; RV32I-NEXT: zext.b a2, a2
+; RV32I-NEXT: slli a3, a3, 24
+; RV32I-NEXT: slli a1, a1, 8
+; RV32I-NEXT: slli a2, a2, 16
+; RV32I-NEXT: or a0, a0, a1
+; RV32I-NEXT: or a2, a2, a3
+; RV32I-NEXT: or a0, a0, a2
+; RV32I-NEXT: ret
+;
+; RV32ZBKB-LABEL: pack_lo_packh_hi_packh_2:
+; RV32ZBKB: # %bb.0:
+; RV32ZBKB-NEXT: packh a0, a0, a1
+; RV32ZBKB-NEXT: packh a1, a2, a3
+; RV32ZBKB-NEXT: pack a0, a0, a1
+; RV32ZBKB-NEXT: ret
+ %a = zext i8 %0 to i32
+ %b = zext i8 %1 to i32
+ %c = zext i8 %2 to i32
+ %d = zext i8 %3 to i32
+ %e = shl i32 %b, 8
+ %f = shl i32 %c, 16
+ %g = shl i32 %d, 24
+ %h = or i32 %a, %e
+ %i = or i32 %h, %f
+ %j = or i32 %i, %g
+ ret i32 %j
+}
+
define i32 @pack_lo_zext_hi_packh(i16 zeroext %0, i8 zeroext %1, i8 zeroext %2) nounwind {
; RV32I-LABEL: pack_lo_zext_hi_packh:
; RV32I: # %bb.0:
-; RV32I-NEXT: slli a1, a2, 16
+; RV32I-NEXT: slli a1, a1, 16
; RV32I-NEXT: slli a2, a2, 24
; RV32I-NEXT: or a1, a2, a1
; RV32I-NEXT: or a0, a1, a0
@@ -361,14 +394,14 @@ define i32 @pack_lo_zext_hi_packh(i16 zeroext %0, i8 zeroext %1, i8 zeroext %2)
;
; RV32ZBKB-LABEL: pack_lo_zext_hi_packh:
; RV32ZBKB: # %bb.0:
-; RV32ZBKB-NEXT: packh a1, a2, a2
+; RV32ZBKB-NEXT: packh a1, a1, a2
; RV32ZBKB-NEXT: pack a0, a0, a1
; RV32ZBKB-NEXT: ret
%a = zext i16 %0 to i32
%b = zext i8 %1 to i32
%c = zext i8 %2 to i32
%d = shl i32 %c, 8
- %e = or i32 %c, %d
+ %e = or i32 %b, %d
%f = shl i32 %e, 16
%g = or i32 %f, %a
ret i32 %g
@@ -379,7 +412,7 @@ define i32 @pack_lo_zext_hi_packh(i16 zeroext %0, i8 zeroext %1, i8 zeroext %2)
define i32 @pack_lo_noext_hi_packh(i32 %a, i8 zeroext %1, i8 zeroext %2) nounwind {
; RV32I-LABEL: pack_lo_noext_hi_packh:
; RV32I: # %bb.0:
-; RV32I-NEXT: slli a1, a2, 16
+; RV32I-NEXT: slli a1, a1, 16
; RV32I-NEXT: slli a2, a2, 24
; RV32I-NEXT: or a1, a2, a1
; RV32I-NEXT: or a0, a1, a0
@@ -387,14 +420,40 @@ define i32 @pack_lo_noext_hi_packh(i32 %a, i8 zeroext %1, i8 zeroext %2) nounwin
;
; RV32ZBKB-LABEL: pack_lo_noext_hi_packh:
; RV32ZBKB: # %bb.0:
-; RV32ZBKB-NEXT: packh a1, a2, a2
+; RV32ZBKB-NEXT: packh a1, a1, a2
+; RV32ZBKB-NEXT: slli a1, a1, 16
+; RV32ZBKB-NEXT: or a0, a1, a0
+; RV32ZBKB-NEXT: ret
+ %b = zext i8 %1 to i32
+ %c = zext i8 %2 to i32
+ %d = shl i32 %c, 8
+ %e = or i32 %b, %d
+ %f = shl i32 %e, 16
+ %g = or i32 %f, %a
+ ret i32 %g
+}
+
+; Make sure we can match packh+slli without having the input bytes zero extended.
+define i32 @pack_lo_noext_hi_packh_nozeroext(i32 %a, i8 %1, i8 %2) nounwind {
+; RV32I-LABEL: pack_lo_noext_hi_packh_nozeroext:
+; RV32I: # %bb.0:
+; RV32I-NEXT: zext.b a1, a1
+; RV32I-NEXT: slli a2, a2, 24
+; RV32I-NEXT: slli a1, a1, 16
+; RV32I-NEXT: or a0, a2, a0
+; RV32I-NEXT: or a0, a0, a1
+; RV32I-NEXT: ret
+;
+; RV32ZBKB-LABEL: pack_lo_noext_hi_packh_nozeroext:
+; RV32ZBKB: # %bb.0:
+; RV32ZBKB-NEXT: packh a1, a1, a2
; RV32ZBKB-NEXT: slli a1, a1, 16
; RV32ZBKB-NEXT: or a0, a1, a0
; RV32ZBKB-NEXT: ret
%b = zext i8 %1 to i32
%c = zext i8 %2 to i32
%d = shl i32 %c, 8
- %e = or i32 %c, %d
+ %e = or i32 %b, %d
%f = shl i32 %e, 16
%g = or i32 %f, %a
ret i32 %g
diff --git a/llvm/test/CodeGen/RISCV/rv64-half-convert.ll b/llvm/test/CodeGen/RISCV/rv64-half-convert.ll
index 57061e1..f89d1abf 100644
--- a/llvm/test/CodeGen/RISCV/rv64-half-convert.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-half-convert.ll
@@ -253,8 +253,8 @@ define i128 @fptosi_sat_f16_to_i128(half %a) nounwind {
; RV64IZHINX-NEXT: srli a1, a2, 1
; RV64IZHINX-NEXT: .LBB4_4:
; RV64IZHINX-NEXT: feq.s a2, s0, s0
-; RV64IZHINX-NEXT: neg a3, a3
; RV64IZHINX-NEXT: neg a4, s1
+; RV64IZHINX-NEXT: neg a3, a3
; RV64IZHINX-NEXT: neg a2, a2
; RV64IZHINX-NEXT: and a0, a4, a0
; RV64IZHINX-NEXT: and a1, a2, a1
@@ -334,18 +334,19 @@ define i128 @fptoui_sat_f16_to_i128(half %a) nounwind {
; RV64IZHINX-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; RV64IZHINX-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; RV64IZHINX-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
-; RV64IZHINX-NEXT: fcvt.s.h a0, a0
-; RV64IZHINX-NEXT: lui a1, 522240
-; RV64IZHINX-NEXT: addi a1, a1, -1
-; RV64IZHINX-NEXT: fle.s a2, zero, a0
-; RV64IZHINX-NEXT: flt.s a1, a1, a0
-; RV64IZHINX-NEXT: neg s0, a1
-; RV64IZHINX-NEXT: neg s1, a2
+; RV64IZHINX-NEXT: fcvt.s.h s0, a0
+; RV64IZHINX-NEXT: fle.s a0, zero, s0
+; RV64IZHINX-NEXT: neg s1, a0
+; RV64IZHINX-NEXT: mv a0, s0
; RV64IZHINX-NEXT: call __fixunssfti
; RV64IZHINX-NEXT: and a0, s1, a0
+; RV64IZHINX-NEXT: lui a2, 522240
; RV64IZHINX-NEXT: and a1, s1, a1
-; RV64IZHINX-NEXT: or a0, s0, a0
-; RV64IZHINX-NEXT: or a1, s0, a1
+; RV64IZHINX-NEXT: addi a2, a2, -1
+; RV64IZHINX-NEXT: flt.s a2, a2, s0
+; RV64IZHINX-NEXT: neg a2, a2
+; RV64IZHINX-NEXT: or a0, a2, a0
+; RV64IZHINX-NEXT: or a1, a2, a1
; RV64IZHINX-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64IZHINX-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64IZHINX-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rv64zbkb.ll b/llvm/test/CodeGen/RISCV/rv64zbkb.ll
index 818ea72..f2c41db 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbkb.ll
@@ -392,3 +392,217 @@ define i64 @zext_i16_to_i64(i16 %a) nounwind {
%1 = zext i16 %a to i64
ret i64 %1
}
+
+define void @pack_lo_packh_hi_packh(i8 zeroext %0, i8 zeroext %1, i8 zeroext %2, i8 zeroext %3, ptr %p) nounwind {
+; RV64I-LABEL: pack_lo_packh_hi_packh:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a1, a1, 8
+; RV64I-NEXT: slli a2, a2, 16
+; RV64I-NEXT: slli a3, a3, 24
+; RV64I-NEXT: or a0, a0, a1
+; RV64I-NEXT: or a2, a2, a3
+; RV64I-NEXT: or a0, a0, a2
+; RV64I-NEXT: sw a0, 0(a4)
+; RV64I-NEXT: ret
+;
+; RV64ZBKB-LABEL: pack_lo_packh_hi_packh:
+; RV64ZBKB: # %bb.0:
+; RV64ZBKB-NEXT: packh a0, a0, a1
+; RV64ZBKB-NEXT: packh a1, a2, a3
+; RV64ZBKB-NEXT: packw a0, a0, a1
+; RV64ZBKB-NEXT: sw a0, 0(a4)
+; RV64ZBKB-NEXT: ret
+ %a = zext i8 %0 to i32
+ %b = zext i8 %1 to i32
+ %c = zext i8 %2 to i32
+ %d = zext i8 %3 to i32
+ %e = shl i32 %b, 8
+ %f = shl i32 %c, 16
+ %g = shl i32 %d, 24
+ %h = or i32 %a, %e
+ %i = or i32 %h, %f
+ %j = or i32 %i, %g
+ store i32 %j, ptr %p
+ ret void
+}
+
+define void @pack_lo_packh_hi_packh_2(i8 zeroext %0, i8 zeroext %1, i8 zeroext %2, i8 zeroext %3, ptr %p) nounwind {
+; RV64I-LABEL: pack_lo_packh_hi_packh_2:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a1, a1, 8
+; RV64I-NEXT: slli a2, a2, 16
+; RV64I-NEXT: slli a3, a3, 24
+; RV64I-NEXT: or a0, a0, a1
+; RV64I-NEXT: or a2, a2, a3
+; RV64I-NEXT: or a0, a2, a0
+; RV64I-NEXT: sw a0, 0(a4)
+; RV64I-NEXT: ret
+;
+; RV64ZBKB-LABEL: pack_lo_packh_hi_packh_2:
+; RV64ZBKB: # %bb.0:
+; RV64ZBKB-NEXT: packh a0, a0, a1
+; RV64ZBKB-NEXT: packh a1, a3, a2
+; RV64ZBKB-NEXT: packw a0, a0, a1
+; RV64ZBKB-NEXT: sw a0, 0(a4)
+; RV64ZBKB-NEXT: ret
+ %a = zext i8 %0 to i32
+ %b = zext i8 %1 to i32
+ %c = zext i8 %2 to i32
+ %d = zext i8 %3 to i32
+ %e = shl i32 %b, 8
+ %f = shl i32 %c, 16
+ %g = shl i32 %d, 24
+ %h = or i32 %a, %e
+ %i = or i32 %g, %h
+ %j = or i32 %f, %i
+ store i32 %j, ptr %p
+ ret void
+}
+
+define void @pack_lo_packh_hi_packh_3(i8 %0, i8 %1, i8 %2, i8 %3, ptr %p) nounwind {
+; RV64I-LABEL: pack_lo_packh_hi_packh_3:
+; RV64I: # %bb.0:
+; RV64I-NEXT: zext.b a0, a0
+; RV64I-NEXT: zext.b a1, a1
+; RV64I-NEXT: zext.b a2, a2
+; RV64I-NEXT: slli a3, a3, 24
+; RV64I-NEXT: slli a1, a1, 8
+; RV64I-NEXT: slli a2, a2, 16
+; RV64I-NEXT: or a0, a3, a0
+; RV64I-NEXT: or a0, a0, a1
+; RV64I-NEXT: or a0, a2, a0
+; RV64I-NEXT: sw a0, 0(a4)
+; RV64I-NEXT: ret
+;
+; RV64ZBKB-LABEL: pack_lo_packh_hi_packh_3:
+; RV64ZBKB: # %bb.0:
+; RV64ZBKB-NEXT: packh a0, a0, a1
+; RV64ZBKB-NEXT: packh a1, a3, a2
+; RV64ZBKB-NEXT: packw a0, a0, a1
+; RV64ZBKB-NEXT: sw a0, 0(a4)
+; RV64ZBKB-NEXT: ret
+ %a = zext i8 %0 to i32
+ %b = zext i8 %1 to i32
+ %c = zext i8 %2 to i32
+ %d = zext i8 %3 to i32
+ %e = shl i32 %b, 8
+ %f = shl i32 %c, 16
+ %g = shl i32 %d, 24
+ %h = or i32 %a, %e
+ %i = or i32 %g, %h
+ %j = or i32 %f, %i
+ store i32 %j, ptr %p
+ ret void
+}
+
+define void @pack_lo_zext_hi_packh(i16 zeroext %0, i8 zeroext %1, i8 zeroext %2, ptr %p) nounwind {
+; RV64I-LABEL: pack_lo_zext_hi_packh:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a1, a1, 16
+; RV64I-NEXT: slli a2, a2, 24
+; RV64I-NEXT: or a1, a2, a1
+; RV64I-NEXT: or a0, a1, a0
+; RV64I-NEXT: sw a0, 0(a3)
+; RV64I-NEXT: ret
+;
+; RV64ZBKB-LABEL: pack_lo_zext_hi_packh:
+; RV64ZBKB: # %bb.0:
+; RV64ZBKB-NEXT: packh a1, a1, a2
+; RV64ZBKB-NEXT: packw a0, a0, a1
+; RV64ZBKB-NEXT: sw a0, 0(a3)
+; RV64ZBKB-NEXT: ret
+ %a = zext i16 %0 to i32
+ %b = zext i8 %1 to i32
+ %c = zext i8 %2 to i32
+ %d = shl i32 %c, 8
+ %e = or i32 %b, %d
+ %f = shl i32 %e, 16
+ %g = or i32 %f, %a
+ store i32 %g, ptr %p
+ ret void
+}
+
+; Negative test, %a isn't extended so we can't use packw for the outer or, but
+; we can use packh for the high half.
+define void @pack_lo_noext_hi_packh(i32 %a, i8 zeroext %1, i8 zeroext %2, ptr %p) nounwind {
+; RV64I-LABEL: pack_lo_noext_hi_packh:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a1, a1, 16
+; RV64I-NEXT: slli a2, a2, 24
+; RV64I-NEXT: or a1, a2, a1
+; RV64I-NEXT: or a0, a1, a0
+; RV64I-NEXT: sw a0, 0(a3)
+; RV64I-NEXT: ret
+;
+; RV64ZBKB-LABEL: pack_lo_noext_hi_packh:
+; RV64ZBKB: # %bb.0:
+; RV64ZBKB-NEXT: packh a1, a1, a2
+; RV64ZBKB-NEXT: slli a1, a1, 16
+; RV64ZBKB-NEXT: or a0, a1, a0
+; RV64ZBKB-NEXT: sw a0, 0(a3)
+; RV64ZBKB-NEXT: ret
+ %b = zext i8 %1 to i32
+ %c = zext i8 %2 to i32
+ %d = shl i32 %c, 8
+ %e = or i32 %b, %d
+ %f = shl i32 %e, 16
+ %g = or i32 %f, %a
+ store i32 %g, ptr %p
+ ret void
+}
+
+; Make sure we can match packh+slli without having the input bytes zero extended.
+define void @pack_i32_lo_noext_hi_packh_nozeroext(i32 %a, i8 %1, i8 %2, ptr %p) nounwind {
+; RV64I-LABEL: pack_i32_lo_noext_hi_packh_nozeroext:
+; RV64I: # %bb.0:
+; RV64I-NEXT: zext.b a1, a1
+; RV64I-NEXT: slli a2, a2, 24
+; RV64I-NEXT: slli a1, a1, 16
+; RV64I-NEXT: or a0, a2, a0
+; RV64I-NEXT: or a0, a0, a1
+; RV64I-NEXT: sw a0, 0(a3)
+; RV64I-NEXT: ret
+;
+; RV64ZBKB-LABEL: pack_i32_lo_noext_hi_packh_nozeroext:
+; RV64ZBKB: # %bb.0:
+; RV64ZBKB-NEXT: packh a1, a1, a2
+; RV64ZBKB-NEXT: slli a1, a1, 16
+; RV64ZBKB-NEXT: or a0, a1, a0
+; RV64ZBKB-NEXT: sw a0, 0(a3)
+; RV64ZBKB-NEXT: ret
+ %b = zext i8 %1 to i32
+ %c = zext i8 %2 to i32
+ %d = shl i32 %c, 8
+ %e = or i32 %b, %d
+ %f = shl i32 %e, 16
+ %g = or i32 %f, %a
+ store i32 %g, ptr %p
+ ret void
+}
+
+; Make sure we can match packh+slli without having the input bytes zero extended.
+define i64 @pack_i64_lo_noext_hi_packh_nozeroext(i64 %a, i8 %1, i8 %2, ptr %p) nounwind {
+; RV64I-LABEL: pack_i64_lo_noext_hi_packh_nozeroext:
+; RV64I: # %bb.0:
+; RV64I-NEXT: zext.b a1, a1
+; RV64I-NEXT: zext.b a2, a2
+; RV64I-NEXT: slli a1, a1, 16
+; RV64I-NEXT: slli a2, a2, 24
+; RV64I-NEXT: or a1, a2, a1
+; RV64I-NEXT: or a0, a1, a0
+; RV64I-NEXT: ret
+;
+; RV64ZBKB-LABEL: pack_i64_lo_noext_hi_packh_nozeroext:
+; RV64ZBKB: # %bb.0:
+; RV64ZBKB-NEXT: packh a1, a1, a2
+; RV64ZBKB-NEXT: slli a1, a1, 16
+; RV64ZBKB-NEXT: or a0, a1, a0
+; RV64ZBKB-NEXT: ret
+ %b = zext i8 %1 to i64
+ %c = zext i8 %2 to i64
+ %d = shl i64 %c, 8
+ %e = or i64 %b, %d
+ %f = shl i64 %e, 16
+ %g = or i64 %f, %a
+ ret i64 %g
+}
diff --git a/llvm/test/CodeGen/RISCV/unaligned-load-store.ll b/llvm/test/CodeGen/RISCV/unaligned-load-store.ll
index c9c49e8..cb046cd 100644
--- a/llvm/test/CodeGen/RISCV/unaligned-load-store.ll
+++ b/llvm/test/CodeGen/RISCV/unaligned-load-store.ll
@@ -204,18 +204,16 @@ define i64 @load_i64(ptr %p) {
; RV64IZBKB-NEXT: lbu a2, 5(a0)
; RV64IZBKB-NEXT: lbu a3, 6(a0)
; RV64IZBKB-NEXT: lbu a4, 7(a0)
-; RV64IZBKB-NEXT: lbu a5, 0(a0)
-; RV64IZBKB-NEXT: lbu a6, 1(a0)
-; RV64IZBKB-NEXT: lbu a7, 2(a0)
-; RV64IZBKB-NEXT: lbu a0, 3(a0)
+; RV64IZBKB-NEXT: lbu a5, 1(a0)
+; RV64IZBKB-NEXT: lbu a6, 2(a0)
+; RV64IZBKB-NEXT: lbu a7, 3(a0)
+; RV64IZBKB-NEXT: lbu a0, 0(a0)
+; RV64IZBKB-NEXT: packh a3, a3, a4
; RV64IZBKB-NEXT: packh a1, a1, a2
-; RV64IZBKB-NEXT: packh a2, a3, a4
-; RV64IZBKB-NEXT: packh a3, a5, a6
-; RV64IZBKB-NEXT: packh a0, a7, a0
-; RV64IZBKB-NEXT: slli a2, a2, 16
-; RV64IZBKB-NEXT: slli a0, a0, 16
-; RV64IZBKB-NEXT: or a1, a2, a1
-; RV64IZBKB-NEXT: or a0, a0, a3
+; RV64IZBKB-NEXT: packh a2, a6, a7
+; RV64IZBKB-NEXT: packh a0, a0, a5
+; RV64IZBKB-NEXT: packw a1, a1, a3
+; RV64IZBKB-NEXT: packw a0, a0, a2
; RV64IZBKB-NEXT: pack a0, a0, a1
; RV64IZBKB-NEXT: ret
;
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/ImplicitBinding.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/ImplicitBinding.ll
new file mode 100644
index 0000000..00e9185
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/ImplicitBinding.ll
@@ -0,0 +1,75 @@
+; RUN: llc -O0 -verify-machineinstrs -mtriple=spirv1.6-vulkan1.3-library %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv1.6-vulkan1.3-library %s -o - -filetype=obj | spirv-val --target-env vulkan1.3 %}
+
+@.str = private unnamed_addr constant [2 x i8] c"b\00", align 1
+@.str.2 = private unnamed_addr constant [2 x i8] c"c\00", align 1
+@.str.4 = private unnamed_addr constant [2 x i8] c"d\00", align 1
+@.str.6 = private unnamed_addr constant [2 x i8] c"e\00", align 1
+@.str.8 = private unnamed_addr constant [2 x i8] c"f\00", align 1
+@.str.10 = private unnamed_addr constant [2 x i8] c"g\00", align 1
+@.str.12 = private unnamed_addr constant [2 x i8] c"h\00", align 1
+@.str.14 = private unnamed_addr constant [2 x i8] c"i\00", align 1
+
+; CHECK-DAG: OpName [[b:%[0-9]+]] "b"
+; CHECK-DAG: OpName [[c:%[0-9]+]] "c"
+; CHECK-DAG: OpName [[d:%[0-9]+]] "d"
+; CHECK-DAG: OpName [[e:%[0-9]+]] "e"
+; CHECK-DAG: OpName [[f:%[0-9]+]] "f"
+; CHECK-DAG: OpName [[g:%[0-9]+]] "g"
+; CHECK-DAG: OpName [[h:%[0-9]+]] "h"
+; CHECK-DAG: OpName [[i:%[0-9]+]] "i"
+; CHECK-DAG: OpDecorate [[b]] DescriptorSet 0
+; CHECK-DAG: OpDecorate [[b]] Binding 1
+; CHECK-DAG: OpDecorate [[c]] DescriptorSet 0
+; CHECK-DAG: OpDecorate [[c]] Binding 0
+; CHECK-DAG: OpDecorate [[d]] DescriptorSet 0
+; CHECK-DAG: OpDecorate [[d]] Binding 3
+; CHECK-DAG: OpDecorate [[e]] DescriptorSet 0
+; CHECK-DAG: OpDecorate [[e]] Binding 2
+; CHECK-DAG: OpDecorate [[f]] DescriptorSet 10
+; CHECK-DAG: OpDecorate [[f]] Binding 1
+; CHECK-DAG: OpDecorate [[g]] DescriptorSet 10
+; CHECK-DAG: OpDecorate [[g]] Binding 0
+; CHECK-DAG: OpDecorate [[h]] DescriptorSet 10
+; CHECK-DAG: OpDecorate [[h]] Binding 3
+; CHECK-DAG: OpDecorate [[i]] DescriptorSet 10
+; CHECK-DAG: OpDecorate [[i]] Binding 2
+
+
+define void @main() local_unnamed_addr #0 {
+entry:
+ %0 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefromimplicitbinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 0, i32 0, i32 1, i32 0, i1 false, ptr nonnull @.str)
+ %1 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 0, i32 0, i32 1, i32 0, i1 false, ptr nonnull @.str.2)
+ %2 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefromimplicitbinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 1, i32 0, i32 1, i32 0, i1 false, ptr nonnull @.str.4)
+ %3 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 0, i32 2, i32 1, i32 0, i1 false, ptr nonnull @.str.6)
+ %4 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 10, i32 1, i32 1, i32 0, i1 false, ptr nonnull @.str.8)
+ %5 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefromimplicitbinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 2, i32 10, i32 1, i32 0, i1 false, ptr nonnull @.str.10)
+ %6 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefromimplicitbinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 3, i32 10, i32 1, i32 0, i1 false, ptr nonnull @.str.12)
+ %7 = tail call target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) @llvm.spv.resource.handlefrombinding.tspirv.SignedImage_i32_5_2_0_0_2_0t(i32 10, i32 2, i32 1, i32 0, i1 false, ptr nonnull @.str.14)
+ %8 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %1, i32 0)
+ %9 = load i32, ptr addrspace(11) %8, align 4
+ %10 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %2, i32 0)
+ %11 = load i32, ptr addrspace(11) %10, align 4
+ %add.i = add nsw i32 %11, %9
+ %12 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %3, i32 0)
+ %13 = load i32, ptr addrspace(11) %12, align 4
+ %add4.i = add nsw i32 %add.i, %13
+ %14 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %4, i32 0)
+ %15 = load i32, ptr addrspace(11) %14, align 4
+ %add6.i = add nsw i32 %add4.i, %15
+ %16 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %5, i32 0)
+ %17 = load i32, ptr addrspace(11) %16, align 4
+ %add8.i = add nsw i32 %add6.i, %17
+ %18 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %6, i32 0)
+ %19 = load i32, ptr addrspace(11) %18, align 4
+ %add10.i = add nsw i32 %add8.i, %19
+ %20 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %7, i32 0)
+ %21 = load i32, ptr addrspace(11) %20, align 4
+ %add12.i = add nsw i32 %add10.i, %21
+ %22 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.SignedImage_i32_5_2_0_0_2_0t(target("spirv.SignedImage", i32, 5, 2, 0, 0, 2, 0) %0, i32 0)
+ store i32 %add12.i, ptr addrspace(11) %22, align 4
+ ret void
+}
+
+
+attributes #0 = { "hlsl.numthreads"="1,1,1" "hlsl.shader"="compute" } \ No newline at end of file
diff --git a/llvm/test/CodeGen/WebAssembly/ref-test-func.ll b/llvm/test/CodeGen/WebAssembly/ref-test-func.ll
index ea2453f..4fda253 100644
--- a/llvm/test/CodeGen/WebAssembly/ref-test-func.ll
+++ b/llvm/test/CodeGen/WebAssembly/ref-test-func.ll
@@ -31,7 +31,7 @@ define void @test_fpsig_return_i32(ptr noundef %func) local_unnamed_addr #0 {
; CHECK-NEXT: call use
; CHECK-NEXT: # fallthrough-return
entry:
- %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, i32 0)
+ %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, i32 poison)
tail call void @use(i32 noundef %res) #3
ret void
}
@@ -48,7 +48,7 @@ define void @test_fpsig_return_i64(ptr noundef %func) local_unnamed_addr #0 {
; CHECK-NEXT: call use
; CHECK-NEXT: # fallthrough-return
entry:
- %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, i64 0)
+ %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, i64 poison)
tail call void @use(i32 noundef %res) #3
ret void
}
@@ -65,7 +65,7 @@ define void @test_fpsig_return_f32(ptr noundef %func) local_unnamed_addr #0 {
; CHECK-NEXT: call use
; CHECK-NEXT: # fallthrough-return
entry:
- %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, float 0.)
+ %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, float poison)
tail call void @use(i32 noundef %res) #3
ret void
}
@@ -82,7 +82,7 @@ define void @test_fpsig_return_f64(ptr noundef %func) local_unnamed_addr #0 {
; CHECK-NEXT: call use
; CHECK-NEXT: # fallthrough-return
entry:
- %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, double 0.)
+ %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, double poison)
tail call void @use(i32 noundef %res) #3
ret void
}
@@ -100,7 +100,7 @@ define void @test_fpsig_param_i32(ptr noundef %func) local_unnamed_addr #0 {
; CHECK-NEXT: call use
; CHECK-NEXT: # fallthrough-return
entry:
- %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, token poison, double 0.)
+ %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, token poison, double poison)
tail call void @use(i32 noundef %res) #3
ret void
}
@@ -118,7 +118,7 @@ define void @test_fpsig_multiple_params_and_returns(ptr noundef %func) local_unn
; CHECK-NEXT: call use
; CHECK-NEXT: # fallthrough-return
entry:
- %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, i32 0, i64 0, float 0., double 0., token poison, i64 0, float 0., i64 0)
+ %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, i32 poison, i64 poison, float poison, double poison, token poison, i64 poison, float poison, i64 poison)
tail call void @use(i32 noundef %res) #3
ret void
}
@@ -137,10 +137,26 @@ define void @test_fpsig_ptrs(ptr noundef %func) local_unnamed_addr #0 {
; CHECK-NEXT: call use
; CHECK-NEXT: # fallthrough-return
entry:
- %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, ptr null, token poison, ptr null, ptr null)
+ %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, ptr poison, token poison, ptr poison, ptr poison)
tail call void @use(i32 noundef %res) #3
ret void
}
+define void @test_reference_types(ptr noundef %func) local_unnamed_addr #0 {
+; CHECK-LABEL: test_reference_types:
+; CHK32: .functype test_reference_types (i32) -> ()
+; CHK64: .functype test_reference_types (i64) -> ()
+; CHECK-NEXT: # %bb.0: # %entry
+; CHECK-NEXT: local.get 0
+; CHK64-NEXT: i32.wrap_i64
+; CHECK-NEXT: table.get __indirect_function_table
+; CHECK-NEXT: ref.test (funcref, externref) -> (externref)
+; CHECK-NEXT: call use
+; CHECK-NEXT: # fallthrough-return
+entry:
+ %res = tail call i32 (ptr, ...) @llvm.wasm.ref.test.func(ptr %func, ptr addrspace(10) poison, token poison, ptr addrspace(20) poison, ptr addrspace(10) poison)
+ tail call void @use(i32 noundef %res) #3
+ ret void
+}
declare void @use(i32 noundef) local_unnamed_addr #1
diff --git a/llvm/test/DebugInfo/X86/DW_AT_alloc_type.ll b/llvm/test/DebugInfo/X86/DW_AT_alloc_type.ll
new file mode 100644
index 0000000..33028f2
--- /dev/null
+++ b/llvm/test/DebugInfo/X86/DW_AT_alloc_type.ll
@@ -0,0 +1,34 @@
+; RUN: llc -O3 -o %t -filetype=obj %s
+; RUN: llvm-dwarfdump %t | FileCheck %s
+
+; based on clang++ output for `int *alloc_int() { return new int; }`
+
+
+target triple = "x86_64-unknown-linux-gnu"
+
+define dso_local ptr @alloc_int() !dbg !3 {
+; CHECK: DW_TAG_subprogram
+entry:
+ %call = call ptr @alloc(i64 noundef 4), !heapallocsite !7
+; CHECK: DW_TAG_call_site
+; CHECK: DW_AT_LLVM_alloc_type ([[ALLOCSITE:.*]])
+ ret ptr %call
+}
+
+; CHECK: {{.*}}[[ALLOCSITE]]: DW_TAG_base_type
+; CHECK: DW_AT_name ("int")
+
+declare dso_local ptr @alloc(i64 noundef)
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!2,!8}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus_14, file: !1, emissionKind: FullDebug)
+!1 = !DIFile(filename: "a.cpp", directory: "/")
+!2 = !{i32 2, !"Debug Info Version", i32 3}
+!3 = distinct !DISubprogram(name: "alloc_int", scope: !1, file: !1, line: 1, type: !4, scopeLine: 1, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition, unit: !0)
+!4 = !DISubroutineType(types: !5)
+!5 = !{!6}
+!6 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !7, size: 64)
+!7 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!8 = !{i32 2, !"Dwarf Version", i32 5}
diff --git a/llvm/test/Instrumentation/TypeSanitizer/alloca.ll b/llvm/test/Instrumentation/TypeSanitizer/alloca.ll
index c53b006..fc72631 100644
--- a/llvm/test/Instrumentation/TypeSanitizer/alloca.ll
+++ b/llvm/test/Instrumentation/TypeSanitizer/alloca.ll
@@ -74,3 +74,56 @@ loop:
exit:
ret void
}
+
+define void @dynamic_alloca_lifetime_test(i1 %c, i64 %n) sanitize_type {
+; CHECK-LABEL: @dynamic_alloca_lifetime_test(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[APP_MEM_MASK:%.*]] = load i64, ptr @__tysan_app_memory_mask, align 8
+; CHECK-NEXT: [[SHADOW_BASE:%.*]] = load i64, ptr @__tysan_shadow_memory_address, align 8
+; CHECK-NEXT: [[X:%.*]] = alloca i32, i64 [[N:%.*]], align 1
+; CHECK-NEXT: [[TMP0:%.*]] = mul i64 [[N]], 4
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[X]] to i64
+; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], [[APP_MEM_MASK]]
+; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 3
+; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[TMP3]], [[SHADOW_BASE]]
+; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
+; CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[TMP0]], 3
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP5]], i8 0, i64 [[TMP6]], i1 false)
+; CHECK-NEXT: br label [[LOOP:%.*]]
+; CHECK: loop:
+; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[N]], 4
+; CHECK-NEXT: [[TMP8:%.*]] = ptrtoint ptr [[X]] to i64
+; CHECK-NEXT: [[TMP9:%.*]] = and i64 [[TMP8]], [[APP_MEM_MASK]]
+; CHECK-NEXT: [[TMP10:%.*]] = shl i64 [[TMP9]], 3
+; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[TMP10]], [[SHADOW_BASE]]
+; CHECK-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP11]] to ptr
+; CHECK-NEXT: [[TMP13:%.*]] = shl i64 [[TMP7]], 3
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP12]], i8 0, i64 [[TMP13]], i1 false)
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[X]])
+; CHECK-NEXT: call void @alloca_test_use(ptr [[X]])
+; CHECK-NEXT: [[TMP14:%.*]] = mul i64 [[N]], 4
+; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr [[X]] to i64
+; CHECK-NEXT: [[TMP16:%.*]] = and i64 [[TMP15]], [[APP_MEM_MASK]]
+; CHECK-NEXT: [[TMP17:%.*]] = shl i64 [[TMP16]], 3
+; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], [[SHADOW_BASE]]
+; CHECK-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP18]] to ptr
+; CHECK-NEXT: [[TMP20:%.*]] = shl i64 [[TMP14]], 3
+; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 8 [[TMP19]], i8 0, i64 [[TMP20]], i1 false)
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[X]])
+; CHECK-NEXT: br i1 [[C:%.*]], label [[LOOP]], label [[EXIT:%.*]]
+; CHECK: exit:
+; CHECK-NEXT: ret void
+;
+entry:
+ %x = alloca i32, i64 %n, align 1
+ br label %loop
+
+loop:
+ call void @llvm.lifetime.start.p0(i64 -1, ptr %x)
+ call void @alloca_test_use(ptr %x)
+ call void @llvm.lifetime.end.p0(i64 -1, ptr %x)
+ br i1 %c, label %loop, label %exit
+
+exit:
+ ret void
+}
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_ds.s b/llvm/test/MC/AMDGPU/gfx1250_asm_ds.s
index f1641fc..b46189b 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_ds.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_ds.s
@@ -1,6 +1,1917 @@
// RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -show-encoding %s | FileCheck --check-prefixes=GFX1250 %s
// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -show-encoding %s 2>&1 | FileCheck --check-prefix=GFX12-ERR %s
+ds_nop
+// GFX1250: ds_nop ; encoding: [0x00,0x00,0x50,0xd8,0x00,0x00,0x00,0x00]
+
+ds_add_f32 v1, v2
+// GFX1250: ds_add_f32 v1, v2 ; encoding: [0x00,0x00,0x54,0xd8,0x01,0x02,0x00,0x00]
+
+ds_add_f32 v1, v2 offset:65535
+// GFX1250: ds_add_f32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x54,0xd8,0x01,0x02,0x00,0x00]
+
+ds_add_f32 v1, v2 offset:0
+// GFX1250: ds_add_f32 v1, v2 ; encoding: [0x00,0x00,0x54,0xd8,0x01,0x02,0x00,0x00]
+
+ds_add_f32 v255, v255 offset:4
+// GFX1250: ds_add_f32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x54,0xd8,0xff,0xff,0x00,0x00]
+
+ds_add_rtn_f32 v5, v1, v2
+// GFX1250: ds_add_rtn_f32 v5, v1, v2 ; encoding: [0x00,0x00,0xe4,0xd9,0x01,0x02,0x00,0x05]
+
+ds_add_rtn_f32 v5, v1, v2 offset:65535
+// GFX1250: ds_add_rtn_f32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xe4,0xd9,0x01,0x02,0x00,0x05]
+
+ds_add_rtn_f32 v5, v1, v2 offset:0
+// GFX1250: ds_add_rtn_f32 v5, v1, v2 ; encoding: [0x00,0x00,0xe4,0xd9,0x01,0x02,0x00,0x05]
+
+ds_add_rtn_f32 v255, v255, v255 offset:4
+// GFX1250: ds_add_rtn_f32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xe4,0xd9,0xff,0xff,0x00,0xff]
+
+ds_add_rtn_u32 v5, v1, v2
+// GFX1250: ds_add_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0x80,0xd8,0x01,0x02,0x00,0x05]
+
+ds_add_rtn_u32 v5, v1, v2 offset:65535
+// GFX1250: ds_add_rtn_u32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0x80,0xd8,0x01,0x02,0x00,0x05]
+
+ds_add_rtn_u32 v5, v1, v2 offset:0
+// GFX1250: ds_add_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0x80,0xd8,0x01,0x02,0x00,0x05]
+
+ds_add_rtn_u32 v255, v255, v255 offset:4
+// GFX1250: ds_add_rtn_u32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0x80,0xd8,0xff,0xff,0x00,0xff]
+
+ds_add_rtn_u64 v[6:7], v1, v[2:3]
+// GFX1250: ds_add_rtn_u64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x80,0xd9,0x01,0x02,0x00,0x06]
+
+ds_add_rtn_u64 v[6:7], v1, v[2:3] offset:65535
+// GFX1250: ds_add_rtn_u64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x80,0xd9,0x01,0x02,0x00,0x06]
+
+ds_add_rtn_u64 v[6:7], v1, v[2:3] offset:0
+// GFX1250: ds_add_rtn_u64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x80,0xd9,0x01,0x02,0x00,0x06]
+
+ds_add_rtn_u64 v[254:255], v255, v[254:255] offset:4
+// GFX1250: ds_add_rtn_u64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x80,0xd9,0xff,0xfe,0x00,0xfe]
+
+ds_add_u32 v1, v2
+// GFX1250: ds_add_u32 v1, v2 ; encoding: [0x00,0x00,0x00,0xd8,0x01,0x02,0x00,0x00]
+
+ds_add_u32 v1, v2 offset:65535
+// GFX1250: ds_add_u32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x00,0xd8,0x01,0x02,0x00,0x00]
+
+ds_add_u32 v1, v2 offset:0
+// GFX1250: ds_add_u32 v1, v2 ; encoding: [0x00,0x00,0x00,0xd8,0x01,0x02,0x00,0x00]
+
+ds_add_u32 v255, v255 offset:4
+// GFX1250: ds_add_u32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x00,0xd8,0xff,0xff,0x00,0x00]
+
+ds_add_u64 v1, v[2:3]
+// GFX1250: ds_add_u64 v1, v[2:3] ; encoding: [0x00,0x00,0x00,0xd9,0x01,0x02,0x00,0x00]
+
+ds_add_u64 v1, v[2:3] offset:65535
+// GFX1250: ds_add_u64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x00,0xd9,0x01,0x02,0x00,0x00]
+
+ds_add_u64 v1, v[2:3] offset:0
+// GFX1250: ds_add_u64 v1, v[2:3] ; encoding: [0x00,0x00,0x00,0xd9,0x01,0x02,0x00,0x00]
+
+ds_add_u64 v255, v[254:255] offset:4
+// GFX1250: ds_add_u64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x00,0xd9,0xff,0xfe,0x00,0x00]
+
+ds_and_b32 v1, v2
+// GFX1250: ds_and_b32 v1, v2 ; encoding: [0x00,0x00,0x24,0xd8,0x01,0x02,0x00,0x00]
+
+ds_and_b32 v1, v2 offset:65535
+// GFX1250: ds_and_b32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x24,0xd8,0x01,0x02,0x00,0x00]
+
+ds_and_b32 v1, v2 offset:0
+// GFX1250: ds_and_b32 v1, v2 ; encoding: [0x00,0x00,0x24,0xd8,0x01,0x02,0x00,0x00]
+
+ds_and_b32 v255, v255 offset:4
+// GFX1250: ds_and_b32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x24,0xd8,0xff,0xff,0x00,0x00]
+
+ds_and_b64 v1, v[2:3]
+// GFX1250: ds_and_b64 v1, v[2:3] ; encoding: [0x00,0x00,0x24,0xd9,0x01,0x02,0x00,0x00]
+
+ds_and_b64 v1, v[2:3] offset:65535
+// GFX1250: ds_and_b64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x24,0xd9,0x01,0x02,0x00,0x00]
+
+ds_and_b64 v1, v[2:3] offset:0
+// GFX1250: ds_and_b64 v1, v[2:3] ; encoding: [0x00,0x00,0x24,0xd9,0x01,0x02,0x00,0x00]
+
+ds_and_b64 v255, v[254:255] offset:4
+// GFX1250: ds_and_b64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x24,0xd9,0xff,0xfe,0x00,0x00]
+
+ds_and_rtn_b32 v5, v1, v2
+// GFX1250: ds_and_rtn_b32 v5, v1, v2 ; encoding: [0x00,0x00,0xa4,0xd8,0x01,0x02,0x00,0x05]
+
+ds_and_rtn_b32 v5, v1, v2 offset:65535
+// GFX1250: ds_and_rtn_b32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xa4,0xd8,0x01,0x02,0x00,0x05]
+
+ds_and_rtn_b32 v5, v1, v2 offset:0
+// GFX1250: ds_and_rtn_b32 v5, v1, v2 ; encoding: [0x00,0x00,0xa4,0xd8,0x01,0x02,0x00,0x05]
+
+ds_and_rtn_b32 v255, v255, v255 offset:4
+// GFX1250: ds_and_rtn_b32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xa4,0xd8,0xff,0xff,0x00,0xff]
+
+ds_and_rtn_b64 v[6:7], v1, v[2:3]
+// GFX1250: ds_and_rtn_b64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xa4,0xd9,0x01,0x02,0x00,0x06]
+
+ds_and_rtn_b64 v[6:7], v1, v[2:3] offset:65535
+// GFX1250: ds_and_rtn_b64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0xa4,0xd9,0x01,0x02,0x00,0x06]
+
+ds_and_rtn_b64 v[6:7], v1, v[2:3] offset:0
+// GFX1250: ds_and_rtn_b64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xa4,0xd9,0x01,0x02,0x00,0x06]
+
+ds_and_rtn_b64 v[254:255], v255, v[254:255] offset:4
+// GFX1250: ds_and_rtn_b64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0xa4,0xd9,0xff,0xfe,0x00,0xfe]
+
+ds_append v5
+// GFX1250: ds_append v5 ; encoding: [0x00,0x00,0xf8,0xd8,0x00,0x00,0x00,0x05]
+
+ds_append v5 offset:65535
+// GFX1250: ds_append v5 offset:65535 ; encoding: [0xff,0xff,0xf8,0xd8,0x00,0x00,0x00,0x05]
+
+ds_append v5 offset:0
+// GFX1250: ds_append v5 ; encoding: [0x00,0x00,0xf8,0xd8,0x00,0x00,0x00,0x05]
+
+ds_append v255 offset:4
+// GFX1250: ds_append v255 offset:4 ; encoding: [0x04,0x00,0xf8,0xd8,0x00,0x00,0x00,0xff]
+
+ds_bpermute_b32 v5, v1, v2
+// GFX1250: ds_bpermute_b32 v5, v1, v2 ; encoding: [0x00,0x00,0xcc,0xda,0x01,0x02,0x00,0x05]
+
+ds_bpermute_b32 v5, v1, v2 offset:65535
+// GFX1250: ds_bpermute_b32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xcc,0xda,0x01,0x02,0x00,0x05]
+
+ds_bpermute_b32 v5, v1, v2 offset:0
+// GFX1250: ds_bpermute_b32 v5, v1, v2 ; encoding: [0x00,0x00,0xcc,0xda,0x01,0x02,0x00,0x05]
+
+ds_bpermute_b32 v255, v255, v255 offset:4
+// GFX1250: ds_bpermute_b32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xcc,0xda,0xff,0xff,0x00,0xff]
+
+ds_cmpstore_b32 v1, v2, v3
+// GFX1250: ds_cmpstore_b32 v1, v2, v3 ; encoding: [0x00,0x00,0x40,0xd8,0x01,0x02,0x03,0x00]
+
+ds_cmpstore_b32 v1, v2, v3 offset:65535
+// GFX1250: ds_cmpstore_b32 v1, v2, v3 offset:65535 ; encoding: [0xff,0xff,0x40,0xd8,0x01,0x02,0x03,0x00]
+
+ds_cmpstore_b32 v1, v2, v3 offset:0
+// GFX1250: ds_cmpstore_b32 v1, v2, v3 ; encoding: [0x00,0x00,0x40,0xd8,0x01,0x02,0x03,0x00]
+
+ds_cmpstore_b32 v255, v255, v255 offset:4
+// GFX1250: ds_cmpstore_b32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0x40,0xd8,0xff,0xff,0xff,0x00]
+
+ds_cmpstore_b64 v1, v[2:3], v[4:5]
+// GFX1250: ds_cmpstore_b64 v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0x40,0xd9,0x01,0x02,0x04,0x00]
+
+ds_cmpstore_b64 v1, v[2:3], v[4:5] offset:65535
+// GFX1250: ds_cmpstore_b64 v1, v[2:3], v[4:5] offset:65535 ; encoding: [0xff,0xff,0x40,0xd9,0x01,0x02,0x04,0x00]
+
+ds_cmpstore_b64 v1, v[2:3], v[4:5] offset:0
+// GFX1250: ds_cmpstore_b64 v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0x40,0xd9,0x01,0x02,0x04,0x00]
+
+ds_cmpstore_b64 v255, v[254:255], v[254:255] offset:4
+// GFX1250: ds_cmpstore_b64 v255, v[254:255], v[254:255] offset:4 ; encoding: [0x04,0x00,0x40,0xd9,0xff,0xfe,0xfe,0x00]
+
+ds_cmpstore_rtn_b32 v5, v1, v2, v3
+// GFX1250: ds_cmpstore_rtn_b32 v5, v1, v2, v3 ; encoding: [0x00,0x00,0xc0,0xd8,0x01,0x02,0x03,0x05]
+
+ds_cmpstore_rtn_b32 v5, v1, v2, v3 offset:65535
+// GFX1250: ds_cmpstore_rtn_b32 v5, v1, v2, v3 offset:65535 ; encoding: [0xff,0xff,0xc0,0xd8,0x01,0x02,0x03,0x05]
+
+ds_cmpstore_rtn_b32 v5, v1, v2, v3 offset:0
+// GFX1250: ds_cmpstore_rtn_b32 v5, v1, v2, v3 ; encoding: [0x00,0x00,0xc0,0xd8,0x01,0x02,0x03,0x05]
+
+ds_cmpstore_rtn_b32 v255, v255, v255, v255 offset:4
+// GFX1250: ds_cmpstore_rtn_b32 v255, v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xc0,0xd8,0xff,0xff,0xff,0xff]
+
+ds_cmpstore_rtn_b64 v[6:7], v1, v[2:3], v[4:5]
+// GFX1250: ds_cmpstore_rtn_b64 v[6:7], v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0xc0,0xd9,0x01,0x02,0x04,0x06]
+
+ds_cmpstore_rtn_b64 v[6:7], v1, v[2:3], v[4:5] offset:65535
+// GFX1250: ds_cmpstore_rtn_b64 v[6:7], v1, v[2:3], v[4:5] offset:65535 ; encoding: [0xff,0xff,0xc0,0xd9,0x01,0x02,0x04,0x06]
+
+ds_cmpstore_rtn_b64 v[6:7], v1, v[2:3], v[4:5] offset:0
+// GFX1250: ds_cmpstore_rtn_b64 v[6:7], v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0xc0,0xd9,0x01,0x02,0x04,0x06]
+
+ds_cmpstore_rtn_b64 v[254:255], v255, v[254:255], v[254:255] offset:4
+// GFX1250: ds_cmpstore_rtn_b64 v[254:255], v255, v[254:255], v[254:255] offset:4 ; encoding: [0x04,0x00,0xc0,0xd9,0xff,0xfe,0xfe,0xfe]
+
+ds_condxchg32_rtn_b64 v[6:7], v1, v[2:3]
+// GFX1250: ds_condxchg32_rtn_b64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xf8,0xd9,0x01,0x02,0x00,0x06]
+
+ds_condxchg32_rtn_b64 v[6:7], v1, v[2:3] offset:65535
+// GFX1250: ds_condxchg32_rtn_b64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0xf8,0xd9,0x01,0x02,0x00,0x06]
+
+ds_condxchg32_rtn_b64 v[6:7], v1, v[2:3] offset:0
+// GFX1250: ds_condxchg32_rtn_b64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xf8,0xd9,0x01,0x02,0x00,0x06]
+
+ds_condxchg32_rtn_b64 v[254:255], v255, v[254:255] offset:4
+// GFX1250: ds_condxchg32_rtn_b64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0xf8,0xd9,0xff,0xfe,0x00,0xfe]
+
+ds_consume v5
+// GFX1250: ds_consume v5 ; encoding: [0x00,0x00,0xf4,0xd8,0x00,0x00,0x00,0x05]
+
+ds_consume v5 offset:65535
+// GFX1250: ds_consume v5 offset:65535 ; encoding: [0xff,0xff,0xf4,0xd8,0x00,0x00,0x00,0x05]
+
+ds_consume v5 offset:0
+// GFX1250: ds_consume v5 ; encoding: [0x00,0x00,0xf4,0xd8,0x00,0x00,0x00,0x05]
+
+ds_consume v255 offset:4
+// GFX1250: ds_consume v255 offset:4 ; encoding: [0x04,0x00,0xf4,0xd8,0x00,0x00,0x00,0xff]
+
+ds_dec_rtn_u32 v5, v1, v2
+// GFX1250: ds_dec_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0x90,0xd8,0x01,0x02,0x00,0x05]
+
+ds_dec_rtn_u32 v5, v1, v2 offset:65535
+// GFX1250: ds_dec_rtn_u32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0x90,0xd8,0x01,0x02,0x00,0x05]
+
+ds_dec_rtn_u32 v5, v1, v2 offset:0
+// GFX1250: ds_dec_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0x90,0xd8,0x01,0x02,0x00,0x05]
+
+ds_dec_rtn_u32 v255, v255, v255 offset:4
+// GFX1250: ds_dec_rtn_u32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0x90,0xd8,0xff,0xff,0x00,0xff]
+
+ds_dec_rtn_u64 v[6:7], v1, v[2:3]
+// GFX1250: ds_dec_rtn_u64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x90,0xd9,0x01,0x02,0x00,0x06]
+
+ds_dec_rtn_u64 v[6:7], v1, v[2:3] offset:65535
+// GFX1250: ds_dec_rtn_u64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x90,0xd9,0x01,0x02,0x00,0x06]
+
+ds_dec_rtn_u64 v[6:7], v1, v[2:3] offset:0
+// GFX1250: ds_dec_rtn_u64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x90,0xd9,0x01,0x02,0x00,0x06]
+
+ds_dec_rtn_u64 v[254:255], v255, v[254:255] offset:4
+// GFX1250: ds_dec_rtn_u64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x90,0xd9,0xff,0xfe,0x00,0xfe]
+
+ds_dec_u32 v1, v2
+// GFX1250: ds_dec_u32 v1, v2 ; encoding: [0x00,0x00,0x10,0xd8,0x01,0x02,0x00,0x00]
+
+ds_dec_u32 v1, v2 offset:65535
+// GFX1250: ds_dec_u32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x10,0xd8,0x01,0x02,0x00,0x00]
+
+ds_dec_u32 v1, v2 offset:0
+// GFX1250: ds_dec_u32 v1, v2 ; encoding: [0x00,0x00,0x10,0xd8,0x01,0x02,0x00,0x00]
+
+ds_dec_u32 v255, v255 offset:4
+// GFX1250: ds_dec_u32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x10,0xd8,0xff,0xff,0x00,0x00]
+
+ds_dec_u64 v1, v[2:3]
+// GFX1250: ds_dec_u64 v1, v[2:3] ; encoding: [0x00,0x00,0x10,0xd9,0x01,0x02,0x00,0x00]
+
+ds_dec_u64 v1, v[2:3] offset:65535
+// GFX1250: ds_dec_u64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x10,0xd9,0x01,0x02,0x00,0x00]
+
+ds_dec_u64 v1, v[2:3] offset:0
+// GFX1250: ds_dec_u64 v1, v[2:3] ; encoding: [0x00,0x00,0x10,0xd9,0x01,0x02,0x00,0x00]
+
+ds_dec_u64 v255, v[254:255] offset:4
+// GFX1250: ds_dec_u64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x10,0xd9,0xff,0xfe,0x00,0x00]
+
+ds_inc_rtn_u32 v5, v1, v2
+// GFX1250: ds_inc_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0x8c,0xd8,0x01,0x02,0x00,0x05]
+
+ds_inc_rtn_u32 v5, v1, v2 offset:65535
+// GFX1250: ds_inc_rtn_u32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0x8c,0xd8,0x01,0x02,0x00,0x05]
+
+ds_inc_rtn_u32 v5, v1, v2 offset:0
+// GFX1250: ds_inc_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0x8c,0xd8,0x01,0x02,0x00,0x05]
+
+ds_inc_rtn_u32 v255, v255, v255 offset:4
+// GFX1250: ds_inc_rtn_u32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0x8c,0xd8,0xff,0xff,0x00,0xff]
+
+ds_inc_rtn_u64 v[6:7], v1, v[2:3]
+// GFX1250: ds_inc_rtn_u64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x8c,0xd9,0x01,0x02,0x00,0x06]
+
+ds_inc_rtn_u64 v[6:7], v1, v[2:3] offset:65535
+// GFX1250: ds_inc_rtn_u64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x8c,0xd9,0x01,0x02,0x00,0x06]
+
+ds_inc_rtn_u64 v[6:7], v1, v[2:3] offset:0
+// GFX1250: ds_inc_rtn_u64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x8c,0xd9,0x01,0x02,0x00,0x06]
+
+ds_inc_rtn_u64 v[254:255], v255, v[254:255] offset:4
+// GFX1250: ds_inc_rtn_u64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x8c,0xd9,0xff,0xfe,0x00,0xfe]
+
+ds_inc_u32 v1, v2
+// GFX1250: ds_inc_u32 v1, v2 ; encoding: [0x00,0x00,0x0c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_inc_u32 v1, v2 offset:65535
+// GFX1250: ds_inc_u32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x0c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_inc_u32 v1, v2 offset:0
+// GFX1250: ds_inc_u32 v1, v2 ; encoding: [0x00,0x00,0x0c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_inc_u32 v255, v255 offset:4
+// GFX1250: ds_inc_u32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x0c,0xd8,0xff,0xff,0x00,0x00]
+
+ds_inc_u64 v1, v[2:3]
+// GFX1250: ds_inc_u64 v1, v[2:3] ; encoding: [0x00,0x00,0x0c,0xd9,0x01,0x02,0x00,0x00]
+
+ds_inc_u64 v1, v[2:3] offset:65535
+// GFX1250: ds_inc_u64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x0c,0xd9,0x01,0x02,0x00,0x00]
+
+ds_inc_u64 v1, v[2:3] offset:0
+// GFX1250: ds_inc_u64 v1, v[2:3] ; encoding: [0x00,0x00,0x0c,0xd9,0x01,0x02,0x00,0x00]
+
+ds_inc_u64 v255, v[254:255] offset:4
+// GFX1250: ds_inc_u64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x0c,0xd9,0xff,0xfe,0x00,0x00]
+
+ds_load_2addr_b32 v[6:7], v1
+// GFX1250: ds_load_2addr_b32 v[6:7], v1 ; encoding: [0x00,0x00,0xdc,0xd8,0x01,0x00,0x00,0x06]
+
+ds_load_2addr_b32 v[6:7], v1 offset0:127 offset1:255
+// GFX1250: ds_load_2addr_b32 v[6:7], v1 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xdc,0xd8,0x01,0x00,0x00,0x06]
+
+ds_load_2addr_b32 v[6:7], v1 offset0:0 offset1:0
+// GFX1250: ds_load_2addr_b32 v[6:7], v1 ; encoding: [0x00,0x00,0xdc,0xd8,0x01,0x00,0x00,0x06]
+
+ds_load_2addr_b32 v[254:255], v255 offset0:16 offset1:1
+// GFX1250: ds_load_2addr_b32 v[254:255], v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0xdc,0xd8,0xff,0x00,0x00,0xfe]
+
+ds_load_2addr_b64 v[6:9], v1
+// GFX1250: ds_load_2addr_b64 v[6:9], v1 ; encoding: [0x00,0x00,0xdc,0xd9,0x01,0x00,0x00,0x06]
+
+ds_load_2addr_b64 v[6:9], v1 offset0:127 offset1:255
+// GFX1250: ds_load_2addr_b64 v[6:9], v1 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xdc,0xd9,0x01,0x00,0x00,0x06]
+
+ds_load_2addr_b64 v[6:9], v1 offset0:0 offset1:0
+// GFX1250: ds_load_2addr_b64 v[6:9], v1 ; encoding: [0x00,0x00,0xdc,0xd9,0x01,0x00,0x00,0x06]
+
+ds_load_2addr_b64 v[252:255], v255 offset0:16 offset1:1
+// GFX1250: ds_load_2addr_b64 v[252:255], v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0xdc,0xd9,0xff,0x00,0x00,0xfc]
+
+ds_load_2addr_stride64_b32 v[6:7], v1
+// GFX1250: ds_load_2addr_stride64_b32 v[6:7], v1 ; encoding: [0x00,0x00,0xe0,0xd8,0x01,0x00,0x00,0x06]
+
+ds_load_2addr_stride64_b32 v[6:7], v1 offset0:127 offset1:255
+// GFX1250: ds_load_2addr_stride64_b32 v[6:7], v1 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xe0,0xd8,0x01,0x00,0x00,0x06]
+
+ds_load_2addr_stride64_b32 v[6:7], v1 offset0:0 offset1:0
+// GFX1250: ds_load_2addr_stride64_b32 v[6:7], v1 ; encoding: [0x00,0x00,0xe0,0xd8,0x01,0x00,0x00,0x06]
+
+ds_load_2addr_stride64_b32 v[254:255], v255 offset0:16 offset1:1
+// GFX1250: ds_load_2addr_stride64_b32 v[254:255], v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0xe0,0xd8,0xff,0x00,0x00,0xfe]
+
+ds_load_2addr_stride64_b64 v[6:9], v1
+// GFX1250: ds_load_2addr_stride64_b64 v[6:9], v1 ; encoding: [0x00,0x00,0xe0,0xd9,0x01,0x00,0x00,0x06]
+
+ds_load_2addr_stride64_b64 v[6:9], v1 offset0:127 offset1:255
+// GFX1250: ds_load_2addr_stride64_b64 v[6:9], v1 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xe0,0xd9,0x01,0x00,0x00,0x06]
+
+ds_load_2addr_stride64_b64 v[6:9], v1 offset0:0 offset1:0
+// GFX1250: ds_load_2addr_stride64_b64 v[6:9], v1 ; encoding: [0x00,0x00,0xe0,0xd9,0x01,0x00,0x00,0x06]
+
+ds_load_2addr_stride64_b64 v[252:255], v255 offset0:16 offset1:1
+// GFX1250: ds_load_2addr_stride64_b64 v[252:255], v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0xe0,0xd9,0xff,0x00,0x00,0xfc]
+
+ds_load_addtid_b32 v5
+// GFX1250: ds_load_addtid_b32 v5 ; encoding: [0x00,0x00,0xc4,0xda,0x00,0x00,0x00,0x05]
+
+ds_load_addtid_b32 v5 offset:65535
+// GFX1250: ds_load_addtid_b32 v5 offset:65535 ; encoding: [0xff,0xff,0xc4,0xda,0x00,0x00,0x00,0x05]
+
+ds_load_addtid_b32 v5 offset:0
+// GFX1250: ds_load_addtid_b32 v5 ; encoding: [0x00,0x00,0xc4,0xda,0x00,0x00,0x00,0x05]
+
+ds_load_addtid_b32 v255 offset:4
+// GFX1250: ds_load_addtid_b32 v255 offset:4 ; encoding: [0x04,0x00,0xc4,0xda,0x00,0x00,0x00,0xff]
+
+ds_load_b128 v[6:9], v1
+// GFX1250: ds_load_b128 v[6:9], v1 ; encoding: [0x00,0x00,0xfc,0xdb,0x01,0x00,0x00,0x06]
+
+ds_load_b128 v[6:9], v1 offset:65535
+// GFX1250: ds_load_b128 v[6:9], v1 offset:65535 ; encoding: [0xff,0xff,0xfc,0xdb,0x01,0x00,0x00,0x06]
+
+ds_load_b128 v[6:9], v1 offset:0
+// GFX1250: ds_load_b128 v[6:9], v1 ; encoding: [0x00,0x00,0xfc,0xdb,0x01,0x00,0x00,0x06]
+
+ds_load_b128 v[252:255], v255 offset:4
+// GFX1250: ds_load_b128 v[252:255], v255 offset:4 ; encoding: [0x04,0x00,0xfc,0xdb,0xff,0x00,0x00,0xfc]
+
+ds_load_b32 v5, v1
+// GFX1250: ds_load_b32 v5, v1 ; encoding: [0x00,0x00,0xd8,0xd8,0x01,0x00,0x00,0x05]
+
+ds_load_b32 v5, v1 offset:65535
+// GFX1250: ds_load_b32 v5, v1 offset:65535 ; encoding: [0xff,0xff,0xd8,0xd8,0x01,0x00,0x00,0x05]
+
+ds_load_b32 v5, v1 offset:0
+// GFX1250: ds_load_b32 v5, v1 ; encoding: [0x00,0x00,0xd8,0xd8,0x01,0x00,0x00,0x05]
+
+ds_load_b32 v255, v255 offset:4
+// GFX1250: ds_load_b32 v255, v255 offset:4 ; encoding: [0x04,0x00,0xd8,0xd8,0xff,0x00,0x00,0xff]
+
+ds_load_b64 v[6:7], v1
+// GFX1250: ds_load_b64 v[6:7], v1 ; encoding: [0x00,0x00,0xd8,0xd9,0x01,0x00,0x00,0x06]
+
+ds_load_b64 v[6:7], v1 offset:65535
+// GFX1250: ds_load_b64 v[6:7], v1 offset:65535 ; encoding: [0xff,0xff,0xd8,0xd9,0x01,0x00,0x00,0x06]
+
+ds_load_b64 v[6:7], v1 offset:0
+// GFX1250: ds_load_b64 v[6:7], v1 ; encoding: [0x00,0x00,0xd8,0xd9,0x01,0x00,0x00,0x06]
+
+ds_load_b64 v[254:255], v255 offset:4
+// GFX1250: ds_load_b64 v[254:255], v255 offset:4 ; encoding: [0x04,0x00,0xd8,0xd9,0xff,0x00,0x00,0xfe]
+
+ds_load_b96 v[6:8], v1
+// GFX1250: ds_load_b96 v[6:8], v1 ; encoding: [0x00,0x00,0xf8,0xdb,0x01,0x00,0x00,0x06]
+
+ds_load_b96 v[6:8], v1 offset:65535
+// GFX1250: ds_load_b96 v[6:8], v1 offset:65535 ; encoding: [0xff,0xff,0xf8,0xdb,0x01,0x00,0x00,0x06]
+
+ds_load_b96 v[6:8], v1 offset:0
+// GFX1250: ds_load_b96 v[6:8], v1 ; encoding: [0x00,0x00,0xf8,0xdb,0x01,0x00,0x00,0x06]
+
+ds_load_b96 v[252:254], v255 offset:4
+// GFX1250: ds_load_b96 v[252:254], v255 offset:4 ; encoding: [0x04,0x00,0xf8,0xdb,0xff,0x00,0x00,0xfc]
+
+ds_load_i16 v5, v1
+// GFX1250: ds_load_i16 v5, v1 ; encoding: [0x00,0x00,0xec,0xd8,0x01,0x00,0x00,0x05]
+
+ds_load_i16 v5, v1 offset:65535
+// GFX1250: ds_load_i16 v5, v1 offset:65535 ; encoding: [0xff,0xff,0xec,0xd8,0x01,0x00,0x00,0x05]
+
+ds_load_i16 v5, v1 offset:0
+// GFX1250: ds_load_i16 v5, v1 ; encoding: [0x00,0x00,0xec,0xd8,0x01,0x00,0x00,0x05]
+
+ds_load_i16 v255, v255 offset:4
+// GFX1250: ds_load_i16 v255, v255 offset:4 ; encoding: [0x04,0x00,0xec,0xd8,0xff,0x00,0x00,0xff]
+
+ds_load_i8 v5, v1
+// GFX1250: ds_load_i8 v5, v1 ; encoding: [0x00,0x00,0xe4,0xd8,0x01,0x00,0x00,0x05]
+
+ds_load_i8 v5, v1 offset:65535
+// GFX1250: ds_load_i8 v5, v1 offset:65535 ; encoding: [0xff,0xff,0xe4,0xd8,0x01,0x00,0x00,0x05]
+
+ds_load_i8 v5, v1 offset:0
+// GFX1250: ds_load_i8 v5, v1 ; encoding: [0x00,0x00,0xe4,0xd8,0x01,0x00,0x00,0x05]
+
+ds_load_i8 v255, v255 offset:4
+// GFX1250: ds_load_i8 v255, v255 offset:4 ; encoding: [0x04,0x00,0xe4,0xd8,0xff,0x00,0x00,0xff]
+
+ds_load_i8_d16 v5, v1
+// GFX1250: ds_load_i8_d16 v5, v1 ; encoding: [0x00,0x00,0x90,0xda,0x01,0x00,0x00,0x05]
+
+ds_load_i8_d16 v5, v1 offset:65535
+// GFX1250: ds_load_i8_d16 v5, v1 offset:65535 ; encoding: [0xff,0xff,0x90,0xda,0x01,0x00,0x00,0x05]
+
+ds_load_i8_d16 v5, v1 offset:0
+// GFX1250: ds_load_i8_d16 v5, v1 ; encoding: [0x00,0x00,0x90,0xda,0x01,0x00,0x00,0x05]
+
+ds_load_i8_d16 v255, v255 offset:4
+// GFX1250: ds_load_i8_d16 v255, v255 offset:4 ; encoding: [0x04,0x00,0x90,0xda,0xff,0x00,0x00,0xff]
+
+ds_load_i8_d16_hi v5, v1
+// GFX1250: ds_load_i8_d16_hi v5, v1 ; encoding: [0x00,0x00,0x94,0xda,0x01,0x00,0x00,0x05]
+
+ds_load_i8_d16_hi v5, v1 offset:65535
+// GFX1250: ds_load_i8_d16_hi v5, v1 offset:65535 ; encoding: [0xff,0xff,0x94,0xda,0x01,0x00,0x00,0x05]
+
+ds_load_i8_d16_hi v5, v1 offset:0
+// GFX1250: ds_load_i8_d16_hi v5, v1 ; encoding: [0x00,0x00,0x94,0xda,0x01,0x00,0x00,0x05]
+
+ds_load_i8_d16_hi v255, v255 offset:4
+// GFX1250: ds_load_i8_d16_hi v255, v255 offset:4 ; encoding: [0x04,0x00,0x94,0xda,0xff,0x00,0x00,0xff]
+
+ds_load_u16 v5, v1
+// GFX1250: ds_load_u16 v5, v1 ; encoding: [0x00,0x00,0xf0,0xd8,0x01,0x00,0x00,0x05]
+
+ds_load_u16 v5, v1 offset:65535
+// GFX1250: ds_load_u16 v5, v1 offset:65535 ; encoding: [0xff,0xff,0xf0,0xd8,0x01,0x00,0x00,0x05]
+
+ds_load_u16 v5, v1 offset:0
+// GFX1250: ds_load_u16 v5, v1 ; encoding: [0x00,0x00,0xf0,0xd8,0x01,0x00,0x00,0x05]
+
+ds_load_u16 v255, v255 offset:4
+// GFX1250: ds_load_u16 v255, v255 offset:4 ; encoding: [0x04,0x00,0xf0,0xd8,0xff,0x00,0x00,0xff]
+
+ds_load_u16_d16 v5, v1
+// GFX1250: ds_load_u16_d16 v5, v1 ; encoding: [0x00,0x00,0x98,0xda,0x01,0x00,0x00,0x05]
+
+ds_load_u16_d16 v5, v1 offset:65535
+// GFX1250: ds_load_u16_d16 v5, v1 offset:65535 ; encoding: [0xff,0xff,0x98,0xda,0x01,0x00,0x00,0x05]
+
+ds_load_u16_d16 v5, v1 offset:0
+// GFX1250: ds_load_u16_d16 v5, v1 ; encoding: [0x00,0x00,0x98,0xda,0x01,0x00,0x00,0x05]
+
+ds_load_u16_d16 v255, v255 offset:4
+// GFX1250: ds_load_u16_d16 v255, v255 offset:4 ; encoding: [0x04,0x00,0x98,0xda,0xff,0x00,0x00,0xff]
+
+ds_load_u16_d16_hi v5, v1
+// GFX1250: ds_load_u16_d16_hi v5, v1 ; encoding: [0x00,0x00,0x9c,0xda,0x01,0x00,0x00,0x05]
+
+ds_load_u16_d16_hi v5, v1 offset:65535
+// GFX1250: ds_load_u16_d16_hi v5, v1 offset:65535 ; encoding: [0xff,0xff,0x9c,0xda,0x01,0x00,0x00,0x05]
+
+ds_load_u16_d16_hi v5, v1 offset:0
+// GFX1250: ds_load_u16_d16_hi v5, v1 ; encoding: [0x00,0x00,0x9c,0xda,0x01,0x00,0x00,0x05]
+
+ds_load_u16_d16_hi v255, v255 offset:4
+// GFX1250: ds_load_u16_d16_hi v255, v255 offset:4 ; encoding: [0x04,0x00,0x9c,0xda,0xff,0x00,0x00,0xff]
+
+ds_load_u8 v5, v1
+// GFX1250: ds_load_u8 v5, v1 ; encoding: [0x00,0x00,0xe8,0xd8,0x01,0x00,0x00,0x05]
+
+ds_load_u8 v5, v1 offset:65535
+// GFX1250: ds_load_u8 v5, v1 offset:65535 ; encoding: [0xff,0xff,0xe8,0xd8,0x01,0x00,0x00,0x05]
+
+ds_load_u8 v5, v1 offset:0
+// GFX1250: ds_load_u8 v5, v1 ; encoding: [0x00,0x00,0xe8,0xd8,0x01,0x00,0x00,0x05]
+
+ds_load_u8 v255, v255 offset:4
+// GFX1250: ds_load_u8 v255, v255 offset:4 ; encoding: [0x04,0x00,0xe8,0xd8,0xff,0x00,0x00,0xff]
+
+ds_load_u8_d16 v5, v1
+// GFX1250: ds_load_u8_d16 v5, v1 ; encoding: [0x00,0x00,0x88,0xda,0x01,0x00,0x00,0x05]
+
+ds_load_u8_d16 v5, v1 offset:65535
+// GFX1250: ds_load_u8_d16 v5, v1 offset:65535 ; encoding: [0xff,0xff,0x88,0xda,0x01,0x00,0x00,0x05]
+
+ds_load_u8_d16 v5, v1 offset:0
+// GFX1250: ds_load_u8_d16 v5, v1 ; encoding: [0x00,0x00,0x88,0xda,0x01,0x00,0x00,0x05]
+
+ds_load_u8_d16 v255, v255 offset:4
+// GFX1250: ds_load_u8_d16 v255, v255 offset:4 ; encoding: [0x04,0x00,0x88,0xda,0xff,0x00,0x00,0xff]
+
+ds_load_u8_d16_hi v5, v1
+// GFX1250: ds_load_u8_d16_hi v5, v1 ; encoding: [0x00,0x00,0x8c,0xda,0x01,0x00,0x00,0x05]
+
+ds_load_u8_d16_hi v5, v1 offset:65535
+// GFX1250: ds_load_u8_d16_hi v5, v1 offset:65535 ; encoding: [0xff,0xff,0x8c,0xda,0x01,0x00,0x00,0x05]
+
+ds_load_u8_d16_hi v5, v1 offset:0
+// GFX1250: ds_load_u8_d16_hi v5, v1 ; encoding: [0x00,0x00,0x8c,0xda,0x01,0x00,0x00,0x05]
+
+ds_load_u8_d16_hi v255, v255 offset:4
+// GFX1250: ds_load_u8_d16_hi v255, v255 offset:4 ; encoding: [0x04,0x00,0x8c,0xda,0xff,0x00,0x00,0xff]
+
+ds_max_num_f32 v1, v2
+// GFX1250: ds_max_num_f32 v1, v2 ; encoding: [0x00,0x00,0x4c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_num_f32 v1, v2 offset:65535
+// GFX1250: ds_max_num_f32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x4c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_num_f32 v1, v2 offset:0
+// GFX1250: ds_max_num_f32 v1, v2 ; encoding: [0x00,0x00,0x4c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_num_f32 v255, v255 offset:4
+// GFX1250: ds_max_num_f32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x4c,0xd8,0xff,0xff,0x00,0x00]
+
+ds_max_num_f64 v1, v[2:3]
+// GFX1250: ds_max_num_f64 v1, v[2:3] ; encoding: [0x00,0x00,0x4c,0xd9,0x01,0x02,0x00,0x00]
+
+ds_max_num_f64 v1, v[2:3] offset:65535
+// GFX1250: ds_max_num_f64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x4c,0xd9,0x01,0x02,0x00,0x00]
+
+ds_max_num_f64 v1, v[2:3] offset:0
+// GFX1250: ds_max_num_f64 v1, v[2:3] ; encoding: [0x00,0x00,0x4c,0xd9,0x01,0x02,0x00,0x00]
+
+ds_max_num_f64 v255, v[254:255] offset:4
+// GFX1250: ds_max_num_f64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x4c,0xd9,0xff,0xfe,0x00,0x00]
+
+ds_max_i32 v1, v2
+// GFX1250: ds_max_i32 v1, v2 ; encoding: [0x00,0x00,0x18,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_i32 v1, v2 offset:65535
+// GFX1250: ds_max_i32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x18,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_i32 v1, v2 offset:0
+// GFX1250: ds_max_i32 v1, v2 ; encoding: [0x00,0x00,0x18,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_i32 v255, v255 offset:4
+// GFX1250: ds_max_i32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x18,0xd8,0xff,0xff,0x00,0x00]
+
+ds_max_i64 v1, v[2:3]
+// GFX1250: ds_max_i64 v1, v[2:3] ; encoding: [0x00,0x00,0x18,0xd9,0x01,0x02,0x00,0x00]
+
+ds_max_i64 v1, v[2:3] offset:65535
+// GFX1250: ds_max_i64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x18,0xd9,0x01,0x02,0x00,0x00]
+
+ds_max_i64 v1, v[2:3] offset:0
+// GFX1250: ds_max_i64 v1, v[2:3] ; encoding: [0x00,0x00,0x18,0xd9,0x01,0x02,0x00,0x00]
+
+ds_max_i64 v255, v[254:255] offset:4
+// GFX1250: ds_max_i64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x18,0xd9,0xff,0xfe,0x00,0x00]
+
+ds_max_num_rtn_f32 v5, v1, v2
+// GFX1250: ds_max_num_rtn_f32 v5, v1, v2 ; encoding: [0x00,0x00,0xcc,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_num_rtn_f32 v5, v1, v2 offset:65535
+// GFX1250: ds_max_num_rtn_f32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xcc,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_num_rtn_f32 v5, v1, v2 offset:0
+// GFX1250: ds_max_num_rtn_f32 v5, v1, v2 ; encoding: [0x00,0x00,0xcc,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_num_rtn_f32 v255, v255, v255 offset:4
+// GFX1250: ds_max_num_rtn_f32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xcc,0xd8,0xff,0xff,0x00,0xff]
+
+ds_max_num_rtn_f64 v[6:7], v1, v[2:3]
+// GFX1250: ds_max_num_rtn_f64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xcc,0xd9,0x01,0x02,0x00,0x06]
+
+ds_max_num_rtn_f64 v[6:7], v1, v[2:3] offset:65535
+// GFX1250: ds_max_num_rtn_f64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0xcc,0xd9,0x01,0x02,0x00,0x06]
+
+ds_max_num_rtn_f64 v[6:7], v1, v[2:3] offset:0
+// GFX1250: ds_max_num_rtn_f64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xcc,0xd9,0x01,0x02,0x00,0x06]
+
+ds_max_num_rtn_f64 v[254:255], v255, v[254:255] offset:4
+// GFX1250: ds_max_num_rtn_f64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0xcc,0xd9,0xff,0xfe,0x00,0xfe]
+
+ds_max_rtn_i32 v5, v1, v2
+// GFX1250: ds_max_rtn_i32 v5, v1, v2 ; encoding: [0x00,0x00,0x98,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_i32 v5, v1, v2 offset:65535
+// GFX1250: ds_max_rtn_i32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0x98,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_i32 v5, v1, v2 offset:0
+// GFX1250: ds_max_rtn_i32 v5, v1, v2 ; encoding: [0x00,0x00,0x98,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_i32 v255, v255, v255 offset:4
+// GFX1250: ds_max_rtn_i32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0x98,0xd8,0xff,0xff,0x00,0xff]
+
+ds_max_rtn_i64 v[6:7], v1, v[2:3]
+// GFX1250: ds_max_rtn_i64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x98,0xd9,0x01,0x02,0x00,0x06]
+
+ds_max_rtn_i64 v[6:7], v1, v[2:3] offset:65535
+// GFX1250: ds_max_rtn_i64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x98,0xd9,0x01,0x02,0x00,0x06]
+
+ds_max_rtn_i64 v[6:7], v1, v[2:3] offset:0
+// GFX1250: ds_max_rtn_i64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x98,0xd9,0x01,0x02,0x00,0x06]
+
+ds_max_rtn_i64 v[254:255], v255, v[254:255] offset:4
+// GFX1250: ds_max_rtn_i64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x98,0xd9,0xff,0xfe,0x00,0xfe]
+
+ds_max_rtn_u32 v5, v1, v2
+// GFX1250: ds_max_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0xa0,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_u32 v5, v1, v2 offset:65535
+// GFX1250: ds_max_rtn_u32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xa0,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_u32 v5, v1, v2 offset:0
+// GFX1250: ds_max_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0xa0,0xd8,0x01,0x02,0x00,0x05]
+
+ds_max_rtn_u32 v255, v255, v255 offset:4
+// GFX1250: ds_max_rtn_u32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xa0,0xd8,0xff,0xff,0x00,0xff]
+
+ds_max_rtn_u64 v[6:7], v1, v[2:3]
+// GFX1250: ds_max_rtn_u64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xa0,0xd9,0x01,0x02,0x00,0x06]
+
+ds_max_rtn_u64 v[6:7], v1, v[2:3] offset:65535
+// GFX1250: ds_max_rtn_u64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0xa0,0xd9,0x01,0x02,0x00,0x06]
+
+ds_max_rtn_u64 v[6:7], v1, v[2:3] offset:0
+// GFX1250: ds_max_rtn_u64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xa0,0xd9,0x01,0x02,0x00,0x06]
+
+ds_max_rtn_u64 v[254:255], v255, v[254:255] offset:4
+// GFX1250: ds_max_rtn_u64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0xa0,0xd9,0xff,0xfe,0x00,0xfe]
+
+ds_max_u32 v1, v2
+// GFX1250: ds_max_u32 v1, v2 ; encoding: [0x00,0x00,0x20,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_u32 v1, v2 offset:65535
+// GFX1250: ds_max_u32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x20,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_u32 v1, v2 offset:0
+// GFX1250: ds_max_u32 v1, v2 ; encoding: [0x00,0x00,0x20,0xd8,0x01,0x02,0x00,0x00]
+
+ds_max_u32 v255, v255 offset:4
+// GFX1250: ds_max_u32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x20,0xd8,0xff,0xff,0x00,0x00]
+
+ds_max_u64 v1, v[2:3]
+// GFX1250: ds_max_u64 v1, v[2:3] ; encoding: [0x00,0x00,0x20,0xd9,0x01,0x02,0x00,0x00]
+
+ds_max_u64 v1, v[2:3] offset:65535
+// GFX1250: ds_max_u64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x20,0xd9,0x01,0x02,0x00,0x00]
+
+ds_max_u64 v1, v[2:3] offset:0
+// GFX1250: ds_max_u64 v1, v[2:3] ; encoding: [0x00,0x00,0x20,0xd9,0x01,0x02,0x00,0x00]
+
+ds_max_u64 v255, v[254:255] offset:4
+// GFX1250: ds_max_u64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x20,0xd9,0xff,0xfe,0x00,0x00]
+
+ds_min_num_f32 v1, v2
+// GFX1250: ds_min_num_f32 v1, v2 ; encoding: [0x00,0x00,0x48,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_num_f32 v1, v2 offset:65535
+// GFX1250: ds_min_num_f32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x48,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_num_f32 v1, v2 offset:0
+// GFX1250: ds_min_num_f32 v1, v2 ; encoding: [0x00,0x00,0x48,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_num_f32 v255, v255 offset:4
+// GFX1250: ds_min_num_f32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x48,0xd8,0xff,0xff,0x00,0x00]
+
+ds_min_num_f64 v1, v[2:3]
+// GFX1250: ds_min_num_f64 v1, v[2:3] ; encoding: [0x00,0x00,0x48,0xd9,0x01,0x02,0x00,0x00]
+
+ds_min_num_f64 v1, v[2:3] offset:65535
+// GFX1250: ds_min_num_f64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x48,0xd9,0x01,0x02,0x00,0x00]
+
+ds_min_num_f64 v1, v[2:3] offset:0
+// GFX1250: ds_min_num_f64 v1, v[2:3] ; encoding: [0x00,0x00,0x48,0xd9,0x01,0x02,0x00,0x00]
+
+ds_min_num_f64 v255, v[254:255] offset:4
+// GFX1250: ds_min_num_f64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x48,0xd9,0xff,0xfe,0x00,0x00]
+
+ds_min_i32 v1, v2
+// GFX1250: ds_min_i32 v1, v2 ; encoding: [0x00,0x00,0x14,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_i32 v1, v2 offset:65535
+// GFX1250: ds_min_i32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x14,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_i32 v1, v2 offset:0
+// GFX1250: ds_min_i32 v1, v2 ; encoding: [0x00,0x00,0x14,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_i32 v255, v255 offset:4
+// GFX1250: ds_min_i32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x14,0xd8,0xff,0xff,0x00,0x00]
+
+ds_min_i64 v1, v[2:3]
+// GFX1250: ds_min_i64 v1, v[2:3] ; encoding: [0x00,0x00,0x14,0xd9,0x01,0x02,0x00,0x00]
+
+ds_min_i64 v1, v[2:3] offset:65535
+// GFX1250: ds_min_i64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x14,0xd9,0x01,0x02,0x00,0x00]
+
+ds_min_i64 v1, v[2:3] offset:0
+// GFX1250: ds_min_i64 v1, v[2:3] ; encoding: [0x00,0x00,0x14,0xd9,0x01,0x02,0x00,0x00]
+
+ds_min_i64 v255, v[254:255] offset:4
+// GFX1250: ds_min_i64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x14,0xd9,0xff,0xfe,0x00,0x00]
+
+ds_min_num_rtn_f32 v5, v1, v2
+// GFX1250: ds_min_num_rtn_f32 v5, v1, v2 ; encoding: [0x00,0x00,0xc8,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_num_rtn_f32 v5, v1, v2 offset:65535
+// GFX1250: ds_min_num_rtn_f32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xc8,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_num_rtn_f32 v5, v1, v2 offset:0
+// GFX1250: ds_min_num_rtn_f32 v5, v1, v2 ; encoding: [0x00,0x00,0xc8,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_num_rtn_f32 v255, v255, v255 offset:4
+// GFX1250: ds_min_num_rtn_f32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xc8,0xd8,0xff,0xff,0x00,0xff]
+
+ds_min_num_rtn_f64 v[6:7], v1, v[2:3]
+// GFX1250: ds_min_num_rtn_f64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xc8,0xd9,0x01,0x02,0x00,0x06]
+
+ds_min_num_rtn_f64 v[6:7], v1, v[2:3] offset:65535
+// GFX1250: ds_min_num_rtn_f64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0xc8,0xd9,0x01,0x02,0x00,0x06]
+
+ds_min_num_rtn_f64 v[6:7], v1, v[2:3] offset:0
+// GFX1250: ds_min_num_rtn_f64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xc8,0xd9,0x01,0x02,0x00,0x06]
+
+ds_min_num_rtn_f64 v[254:255], v255, v[254:255] offset:4
+// GFX1250: ds_min_num_rtn_f64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0xc8,0xd9,0xff,0xfe,0x00,0xfe]
+
+ds_min_rtn_i32 v5, v1, v2
+// GFX1250: ds_min_rtn_i32 v5, v1, v2 ; encoding: [0x00,0x00,0x94,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_i32 v5, v1, v2 offset:65535
+// GFX1250: ds_min_rtn_i32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0x94,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_i32 v5, v1, v2 offset:0
+// GFX1250: ds_min_rtn_i32 v5, v1, v2 ; encoding: [0x00,0x00,0x94,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_i32 v255, v255, v255 offset:4
+// GFX1250: ds_min_rtn_i32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0x94,0xd8,0xff,0xff,0x00,0xff]
+
+ds_min_rtn_i64 v[6:7], v1, v[2:3]
+// GFX1250: ds_min_rtn_i64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x94,0xd9,0x01,0x02,0x00,0x06]
+
+ds_min_rtn_i64 v[6:7], v1, v[2:3] offset:65535
+// GFX1250: ds_min_rtn_i64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x94,0xd9,0x01,0x02,0x00,0x06]
+
+ds_min_rtn_i64 v[6:7], v1, v[2:3] offset:0
+// GFX1250: ds_min_rtn_i64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x94,0xd9,0x01,0x02,0x00,0x06]
+
+ds_min_rtn_i64 v[254:255], v255, v[254:255] offset:4
+// GFX1250: ds_min_rtn_i64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x94,0xd9,0xff,0xfe,0x00,0xfe]
+
+ds_min_rtn_u32 v5, v1, v2
+// GFX1250: ds_min_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0x9c,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_u32 v5, v1, v2 offset:65535
+// GFX1250: ds_min_rtn_u32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0x9c,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_u32 v5, v1, v2 offset:0
+// GFX1250: ds_min_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0x9c,0xd8,0x01,0x02,0x00,0x05]
+
+ds_min_rtn_u32 v255, v255, v255 offset:4
+// GFX1250: ds_min_rtn_u32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0x9c,0xd8,0xff,0xff,0x00,0xff]
+
+ds_min_rtn_u64 v[6:7], v1, v[2:3]
+// GFX1250: ds_min_rtn_u64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x9c,0xd9,0x01,0x02,0x00,0x06]
+
+ds_min_rtn_u64 v[6:7], v1, v[2:3] offset:65535
+// GFX1250: ds_min_rtn_u64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x9c,0xd9,0x01,0x02,0x00,0x06]
+
+ds_min_rtn_u64 v[6:7], v1, v[2:3] offset:0
+// GFX1250: ds_min_rtn_u64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x9c,0xd9,0x01,0x02,0x00,0x06]
+
+ds_min_rtn_u64 v[254:255], v255, v[254:255] offset:4
+// GFX1250: ds_min_rtn_u64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x9c,0xd9,0xff,0xfe,0x00,0xfe]
+
+ds_min_u32 v1, v2
+// GFX1250: ds_min_u32 v1, v2 ; encoding: [0x00,0x00,0x1c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_u32 v1, v2 offset:65535
+// GFX1250: ds_min_u32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x1c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_u32 v1, v2 offset:0
+// GFX1250: ds_min_u32 v1, v2 ; encoding: [0x00,0x00,0x1c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_min_u32 v255, v255 offset:4
+// GFX1250: ds_min_u32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x1c,0xd8,0xff,0xff,0x00,0x00]
+
+ds_min_u64 v1, v[2:3]
+// GFX1250: ds_min_u64 v1, v[2:3] ; encoding: [0x00,0x00,0x1c,0xd9,0x01,0x02,0x00,0x00]
+
+ds_min_u64 v1, v[2:3] offset:65535
+// GFX1250: ds_min_u64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x1c,0xd9,0x01,0x02,0x00,0x00]
+
+ds_min_u64 v1, v[2:3] offset:0
+// GFX1250: ds_min_u64 v1, v[2:3] ; encoding: [0x00,0x00,0x1c,0xd9,0x01,0x02,0x00,0x00]
+
+ds_min_u64 v255, v[254:255] offset:4
+// GFX1250: ds_min_u64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x1c,0xd9,0xff,0xfe,0x00,0x00]
+
+ds_mskor_b32 v1, v2, v3
+// GFX1250: ds_mskor_b32 v1, v2, v3 ; encoding: [0x00,0x00,0x30,0xd8,0x01,0x02,0x03,0x00]
+
+ds_mskor_b32 v1, v2, v3 offset:65535
+// GFX1250: ds_mskor_b32 v1, v2, v3 offset:65535 ; encoding: [0xff,0xff,0x30,0xd8,0x01,0x02,0x03,0x00]
+
+ds_mskor_b32 v1, v2, v3 offset:0
+// GFX1250: ds_mskor_b32 v1, v2, v3 ; encoding: [0x00,0x00,0x30,0xd8,0x01,0x02,0x03,0x00]
+
+ds_mskor_b32 v255, v255, v255 offset:4
+// GFX1250: ds_mskor_b32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0x30,0xd8,0xff,0xff,0xff,0x00]
+
+ds_mskor_b64 v1, v[2:3], v[4:5]
+// GFX1250: ds_mskor_b64 v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0x30,0xd9,0x01,0x02,0x04,0x00]
+
+ds_mskor_b64 v1, v[2:3], v[4:5] offset:65535
+// GFX1250: ds_mskor_b64 v1, v[2:3], v[4:5] offset:65535 ; encoding: [0xff,0xff,0x30,0xd9,0x01,0x02,0x04,0x00]
+
+ds_mskor_b64 v1, v[2:3], v[4:5] offset:0
+// GFX1250: ds_mskor_b64 v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0x30,0xd9,0x01,0x02,0x04,0x00]
+
+ds_mskor_b64 v255, v[254:255], v[254:255] offset:4
+// GFX1250: ds_mskor_b64 v255, v[254:255], v[254:255] offset:4 ; encoding: [0x04,0x00,0x30,0xd9,0xff,0xfe,0xfe,0x00]
+
+ds_mskor_rtn_b32 v5, v1, v2, v3
+// GFX1250: ds_mskor_rtn_b32 v5, v1, v2, v3 ; encoding: [0x00,0x00,0xb0,0xd8,0x01,0x02,0x03,0x05]
+
+ds_mskor_rtn_b32 v5, v1, v2, v3 offset:65535
+// GFX1250: ds_mskor_rtn_b32 v5, v1, v2, v3 offset:65535 ; encoding: [0xff,0xff,0xb0,0xd8,0x01,0x02,0x03,0x05]
+
+ds_mskor_rtn_b32 v5, v1, v2, v3 offset:0
+// GFX1250: ds_mskor_rtn_b32 v5, v1, v2, v3 ; encoding: [0x00,0x00,0xb0,0xd8,0x01,0x02,0x03,0x05]
+
+ds_mskor_rtn_b32 v255, v255, v255, v255 offset:4
+// GFX1250: ds_mskor_rtn_b32 v255, v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xb0,0xd8,0xff,0xff,0xff,0xff]
+
+ds_mskor_rtn_b64 v[6:7], v1, v[2:3], v[4:5]
+// GFX1250: ds_mskor_rtn_b64 v[6:7], v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0xb0,0xd9,0x01,0x02,0x04,0x06]
+
+ds_mskor_rtn_b64 v[6:7], v1, v[2:3], v[4:5] offset:65535
+// GFX1250: ds_mskor_rtn_b64 v[6:7], v1, v[2:3], v[4:5] offset:65535 ; encoding: [0xff,0xff,0xb0,0xd9,0x01,0x02,0x04,0x06]
+
+ds_mskor_rtn_b64 v[6:7], v1, v[2:3], v[4:5] offset:0
+// GFX1250: ds_mskor_rtn_b64 v[6:7], v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0xb0,0xd9,0x01,0x02,0x04,0x06]
+
+ds_mskor_rtn_b64 v[254:255], v255, v[254:255], v[254:255] offset:4
+// GFX1250: ds_mskor_rtn_b64 v[254:255], v255, v[254:255], v[254:255] offset:4 ; encoding: [0x04,0x00,0xb0,0xd9,0xff,0xfe,0xfe,0xfe]
+
+ds_or_b32 v1, v2
+// GFX1250: ds_or_b32 v1, v2 ; encoding: [0x00,0x00,0x28,0xd8,0x01,0x02,0x00,0x00]
+
+ds_or_b32 v1, v2 offset:65535
+// GFX1250: ds_or_b32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x28,0xd8,0x01,0x02,0x00,0x00]
+
+ds_or_b32 v1, v2 offset:0
+// GFX1250: ds_or_b32 v1, v2 ; encoding: [0x00,0x00,0x28,0xd8,0x01,0x02,0x00,0x00]
+
+ds_or_b32 v255, v255 offset:4
+// GFX1250: ds_or_b32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x28,0xd8,0xff,0xff,0x00,0x00]
+
+ds_or_b64 v1, v[2:3]
+// GFX1250: ds_or_b64 v1, v[2:3] ; encoding: [0x00,0x00,0x28,0xd9,0x01,0x02,0x00,0x00]
+
+ds_or_b64 v1, v[2:3] offset:65535
+// GFX1250: ds_or_b64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x28,0xd9,0x01,0x02,0x00,0x00]
+
+ds_or_b64 v1, v[2:3] offset:0
+// GFX1250: ds_or_b64 v1, v[2:3] ; encoding: [0x00,0x00,0x28,0xd9,0x01,0x02,0x00,0x00]
+
+ds_or_b64 v255, v[254:255] offset:4
+// GFX1250: ds_or_b64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x28,0xd9,0xff,0xfe,0x00,0x00]
+
+ds_or_rtn_b32 v5, v1, v2
+// GFX1250: ds_or_rtn_b32 v5, v1, v2 ; encoding: [0x00,0x00,0xa8,0xd8,0x01,0x02,0x00,0x05]
+
+ds_or_rtn_b32 v5, v1, v2 offset:65535
+// GFX1250: ds_or_rtn_b32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xa8,0xd8,0x01,0x02,0x00,0x05]
+
+ds_or_rtn_b32 v5, v1, v2 offset:0
+// GFX1250: ds_or_rtn_b32 v5, v1, v2 ; encoding: [0x00,0x00,0xa8,0xd8,0x01,0x02,0x00,0x05]
+
+ds_or_rtn_b32 v255, v255, v255 offset:4
+// GFX1250: ds_or_rtn_b32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xa8,0xd8,0xff,0xff,0x00,0xff]
+
+ds_or_rtn_b64 v[6:7], v1, v[2:3]
+// GFX1250: ds_or_rtn_b64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xa8,0xd9,0x01,0x02,0x00,0x06]
+
+ds_or_rtn_b64 v[6:7], v1, v[2:3] offset:65535
+// GFX1250: ds_or_rtn_b64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0xa8,0xd9,0x01,0x02,0x00,0x06]
+
+ds_or_rtn_b64 v[6:7], v1, v[2:3] offset:0
+// GFX1250: ds_or_rtn_b64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xa8,0xd9,0x01,0x02,0x00,0x06]
+
+ds_or_rtn_b64 v[254:255], v255, v[254:255] offset:4
+// GFX1250: ds_or_rtn_b64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0xa8,0xd9,0xff,0xfe,0x00,0xfe]
+
+ds_permute_b32 v5, v1, v2
+// GFX1250: ds_permute_b32 v5, v1, v2 ; encoding: [0x00,0x00,0xc8,0xda,0x01,0x02,0x00,0x05]
+
+ds_permute_b32 v5, v1, v2 offset:65535
+// GFX1250: ds_permute_b32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xc8,0xda,0x01,0x02,0x00,0x05]
+
+ds_permute_b32 v5, v1, v2 offset:0
+// GFX1250: ds_permute_b32 v5, v1, v2 ; encoding: [0x00,0x00,0xc8,0xda,0x01,0x02,0x00,0x05]
+
+ds_permute_b32 v255, v255, v255 offset:4
+// GFX1250: ds_permute_b32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xc8,0xda,0xff,0xff,0x00,0xff]
+
+ds_pk_add_f16 v2, v1
+// GFX1250: ds_pk_add_f16 v2, v1 ; encoding: [0x00,0x00,0x68,0xda,0x02,0x01,0x00,0x00]
+
+ds_pk_add_f16 v2, v1 offset:0
+// GFX1250: ds_pk_add_f16 v2, v1 ; encoding: [0x00,0x00,0x68,0xda,0x02,0x01,0x00,0x00]
+
+ds_pk_add_f16 v2, v1 offset:4660
+// GFX1250: ds_pk_add_f16 v2, v1 offset:4660 ; encoding: [0x34,0x12,0x68,0xda,0x02,0x01,0x00,0x00]
+
+ds_pk_add_f16 v2, v1 offset:65535
+// GFX1250: ds_pk_add_f16 v2, v1 offset:65535 ; encoding: [0xff,0xff,0x68,0xda,0x02,0x01,0x00,0x00]
+
+ds_pk_add_f16 v255, v255
+// GFX1250: ds_pk_add_f16 v255, v255 ; encoding: [0x00,0x00,0x68,0xda,0xff,0xff,0x00,0x00]
+
+ds_pk_add_f16 v255, v255 offset:0
+// GFX1250: ds_pk_add_f16 v255, v255 ; encoding: [0x00,0x00,0x68,0xda,0xff,0xff,0x00,0x00]
+
+ds_pk_add_f16 v255, v255 offset:4660
+// GFX1250: ds_pk_add_f16 v255, v255 offset:4660 ; encoding: [0x34,0x12,0x68,0xda,0xff,0xff,0x00,0x00]
+
+ds_pk_add_f16 v255, v255 offset:65535
+// GFX1250: ds_pk_add_f16 v255, v255 offset:65535 ; encoding: [0xff,0xff,0x68,0xda,0xff,0xff,0x00,0x00]
+
+ds_pk_add_f16 v0, v0
+// GFX1250: ds_pk_add_f16 v0, v0 ; encoding: [0x00,0x00,0x68,0xda,0x00,0x00,0x00,0x00]
+
+ds_pk_add_bf16 v2, v1
+// GFX1250: ds_pk_add_bf16 v2, v1 ; encoding: [0x00,0x00,0x6c,0xda,0x02,0x01,0x00,0x00]
+
+ds_pk_add_bf16 v2, v1 offset:0
+// GFX1250: ds_pk_add_bf16 v2, v1 ; encoding: [0x00,0x00,0x6c,0xda,0x02,0x01,0x00,0x00]
+
+ds_pk_add_bf16 v255, v255
+// GFX1250: ds_pk_add_bf16 v255, v255 ; encoding: [0x00,0x00,0x6c,0xda,0xff,0xff,0x00,0x00]
+
+ds_pk_add_bf16 v255, v255 offset:4660
+// GFX1250: ds_pk_add_bf16 v255, v255 offset:4660 ; encoding: [0x34,0x12,0x6c,0xda,0xff,0xff,0x00,0x00]
+
+ds_pk_add_bf16 v0, v0
+// GFX1250: ds_pk_add_bf16 v0, v0 ; encoding: [0x00,0x00,0x6c,0xda,0x00,0x00,0x00,0x00]
+
+ds_pk_add_bf16 v0, v0 offset:65535
+// GFX1250: ds_pk_add_bf16 v0, v0 offset:65535 ; encoding: [0xff,0xff,0x6c,0xda,0x00,0x00,0x00,0x00]
+
+ds_pk_add_rtn_f16 v3, v2, v1
+// GFX1250: ds_pk_add_rtn_f16 v3, v2, v1 ; encoding: [0x00,0x00,0xa8,0xda,0x02,0x01,0x00,0x03]
+
+ds_pk_add_rtn_f16 v3, v2, v1 offset:4660
+// GFX1250: ds_pk_add_rtn_f16 v3, v2, v1 offset:4660 ; encoding: [0x34,0x12,0xa8,0xda,0x02,0x01,0x00,0x03]
+
+ds_pk_add_rtn_f16 v255, v0, v200
+// GFX1250: ds_pk_add_rtn_f16 v255, v0, v200 ; encoding: [0x00,0x00,0xa8,0xda,0x00,0xc8,0x00,0xff]
+
+ds_pk_add_rtn_f16 v255, v0, v200 offset:65535
+// GFX1250: ds_pk_add_rtn_f16 v255, v0, v200 offset:65535 ; encoding: [0xff,0xff,0xa8,0xda,0x00,0xc8,0x00,0xff]
+
+ds_pk_add_rtn_f16 v255, v255, v255
+// GFX1250: ds_pk_add_rtn_f16 v255, v255, v255 ; encoding: [0x00,0x00,0xa8,0xda,0xff,0xff,0x00,0xff]
+
+ds_pk_add_rtn_bf16 v3, v2, v1
+// GFX1250: ds_pk_add_rtn_bf16 v3, v2, v1 ; encoding: [0x00,0x00,0xac,0xda,0x02,0x01,0x00,0x03]
+
+ds_pk_add_rtn_bf16 v3, v2, v1 offset:4660
+// GFX1250: ds_pk_add_rtn_bf16 v3, v2, v1 offset:4660 ; encoding: [0x34,0x12,0xac,0xda,0x02,0x01,0x00,0x03]
+
+ds_pk_add_rtn_bf16 v255, v0, v200
+// GFX1250: ds_pk_add_rtn_bf16 v255, v0, v200 ; encoding: [0x00,0x00,0xac,0xda,0x00,0xc8,0x00,0xff]
+
+ds_pk_add_rtn_bf16 v255, v255, v255
+// GFX1250: ds_pk_add_rtn_bf16 v255, v255, v255 ; encoding: [0x00,0x00,0xac,0xda,0xff,0xff,0x00,0xff]
+
+ds_pk_add_rtn_bf16 v255, v255, v255 offset:65535
+// GFX1250: ds_pk_add_rtn_bf16 v255, v255, v255 offset:65535 ; encoding: [0xff,0xff,0xac,0xda,0xff,0xff,0x00,0xff]
+
+ds_read2_b32 v[6:7], v1
+// GFX1250: ds_load_2addr_b32 v[6:7], v1 ; encoding: [0x00,0x00,0xdc,0xd8,0x01,0x00,0x00,0x06]
+
+ds_read2_b32 v[6:7], v1 offset0:127 offset1:255
+// GFX1250: ds_load_2addr_b32 v[6:7], v1 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xdc,0xd8,0x01,0x00,0x00,0x06]
+
+ds_read2_b32 v[6:7], v1 offset0:0 offset1:0
+// GFX1250: ds_load_2addr_b32 v[6:7], v1 ; encoding: [0x00,0x00,0xdc,0xd8,0x01,0x00,0x00,0x06]
+
+ds_read2_b32 v[254:255], v255 offset0:16 offset1:1
+// GFX1250: ds_load_2addr_b32 v[254:255], v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0xdc,0xd8,0xff,0x00,0x00,0xfe]
+
+ds_read2_b64 v[6:9], v1
+// GFX1250: ds_load_2addr_b64 v[6:9], v1 ; encoding: [0x00,0x00,0xdc,0xd9,0x01,0x00,0x00,0x06]
+
+ds_read2_b64 v[6:9], v1 offset0:127 offset1:255
+// GFX1250: ds_load_2addr_b64 v[6:9], v1 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xdc,0xd9,0x01,0x00,0x00,0x06]
+
+ds_read2_b64 v[6:9], v1 offset0:0 offset1:0
+// GFX1250: ds_load_2addr_b64 v[6:9], v1 ; encoding: [0x00,0x00,0xdc,0xd9,0x01,0x00,0x00,0x06]
+
+ds_read2_b64 v[252:255], v255 offset0:16 offset1:1
+// GFX1250: ds_load_2addr_b64 v[252:255], v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0xdc,0xd9,0xff,0x00,0x00,0xfc]
+
+ds_read2st64_b32 v[6:7], v1
+// GFX1250: ds_load_2addr_stride64_b32 v[6:7], v1 ; encoding: [0x00,0x00,0xe0,0xd8,0x01,0x00,0x00,0x06]
+
+ds_read2st64_b32 v[6:7], v1 offset0:127 offset1:255
+// GFX1250: ds_load_2addr_stride64_b32 v[6:7], v1 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xe0,0xd8,0x01,0x00,0x00,0x06]
+
+ds_read2st64_b32 v[6:7], v1 offset0:0 offset1:0
+// GFX1250: ds_load_2addr_stride64_b32 v[6:7], v1 ; encoding: [0x00,0x00,0xe0,0xd8,0x01,0x00,0x00,0x06]
+
+ds_read2st64_b32 v[254:255], v255 offset0:16 offset1:1
+// GFX1250: ds_load_2addr_stride64_b32 v[254:255], v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0xe0,0xd8,0xff,0x00,0x00,0xfe]
+
+ds_read2st64_b64 v[6:9], v1
+// GFX1250: ds_load_2addr_stride64_b64 v[6:9], v1 ; encoding: [0x00,0x00,0xe0,0xd9,0x01,0x00,0x00,0x06]
+
+ds_read2st64_b64 v[6:9], v1 offset0:127 offset1:255
+// GFX1250: ds_load_2addr_stride64_b64 v[6:9], v1 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xe0,0xd9,0x01,0x00,0x00,0x06]
+
+ds_read2st64_b64 v[6:9], v1 offset0:0 offset1:0
+// GFX1250: ds_load_2addr_stride64_b64 v[6:9], v1 ; encoding: [0x00,0x00,0xe0,0xd9,0x01,0x00,0x00,0x06]
+
+ds_read2st64_b64 v[252:255], v255 offset0:16 offset1:1
+// GFX1250: ds_load_2addr_stride64_b64 v[252:255], v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0xe0,0xd9,0xff,0x00,0x00,0xfc]
+
+ds_read_addtid_b32 v5
+// GFX1250: ds_load_addtid_b32 v5 ; encoding: [0x00,0x00,0xc4,0xda,0x00,0x00,0x00,0x05]
+
+ds_read_addtid_b32 v5 offset:65535
+// GFX1250: ds_load_addtid_b32 v5 offset:65535 ; encoding: [0xff,0xff,0xc4,0xda,0x00,0x00,0x00,0x05]
+
+ds_read_addtid_b32 v5 offset:0
+// GFX1250: ds_load_addtid_b32 v5 ; encoding: [0x00,0x00,0xc4,0xda,0x00,0x00,0x00,0x05]
+
+ds_read_addtid_b32 v255 offset:4
+// GFX1250: ds_load_addtid_b32 v255 offset:4 ; encoding: [0x04,0x00,0xc4,0xda,0x00,0x00,0x00,0xff]
+
+ds_read_b128 v[6:9], v1
+// GFX1250: ds_load_b128 v[6:9], v1 ; encoding: [0x00,0x00,0xfc,0xdb,0x01,0x00,0x00,0x06]
+
+ds_read_b128 v[6:9], v1 offset:65535
+// GFX1250: ds_load_b128 v[6:9], v1 offset:65535 ; encoding: [0xff,0xff,0xfc,0xdb,0x01,0x00,0x00,0x06]
+
+ds_read_b128 v[6:9], v1 offset:0
+// GFX1250: ds_load_b128 v[6:9], v1 ; encoding: [0x00,0x00,0xfc,0xdb,0x01,0x00,0x00,0x06]
+
+ds_read_b128 v[252:255], v255 offset:4
+// GFX1250: ds_load_b128 v[252:255], v255 offset:4 ; encoding: [0x04,0x00,0xfc,0xdb,0xff,0x00,0x00,0xfc]
+
+ds_read_b32 v5, v1
+// GFX1250: ds_load_b32 v5, v1 ; encoding: [0x00,0x00,0xd8,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_b32 v5, v1 offset:65535
+// GFX1250: ds_load_b32 v5, v1 offset:65535 ; encoding: [0xff,0xff,0xd8,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_b32 v5, v1 offset:0
+// GFX1250: ds_load_b32 v5, v1 ; encoding: [0x00,0x00,0xd8,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_b32 v255, v255 offset:4
+// GFX1250: ds_load_b32 v255, v255 offset:4 ; encoding: [0x04,0x00,0xd8,0xd8,0xff,0x00,0x00,0xff]
+
+ds_read_b64 v[6:7], v1
+// GFX1250: ds_load_b64 v[6:7], v1 ; encoding: [0x00,0x00,0xd8,0xd9,0x01,0x00,0x00,0x06]
+
+ds_read_b64 v[6:7], v1 offset:65535
+// GFX1250: ds_load_b64 v[6:7], v1 offset:65535 ; encoding: [0xff,0xff,0xd8,0xd9,0x01,0x00,0x00,0x06]
+
+ds_read_b64 v[6:7], v1 offset:0
+// GFX1250: ds_load_b64 v[6:7], v1 ; encoding: [0x00,0x00,0xd8,0xd9,0x01,0x00,0x00,0x06]
+
+ds_read_b64 v[254:255], v255 offset:4
+// GFX1250: ds_load_b64 v[254:255], v255 offset:4 ; encoding: [0x04,0x00,0xd8,0xd9,0xff,0x00,0x00,0xfe]
+
+ds_read_b96 v[6:8], v1
+// GFX1250: ds_load_b96 v[6:8], v1 ; encoding: [0x00,0x00,0xf8,0xdb,0x01,0x00,0x00,0x06]
+
+ds_read_b96 v[6:8], v1 offset:65535
+// GFX1250: ds_load_b96 v[6:8], v1 offset:65535 ; encoding: [0xff,0xff,0xf8,0xdb,0x01,0x00,0x00,0x06]
+
+ds_read_b96 v[6:8], v1 offset:0
+// GFX1250: ds_load_b96 v[6:8], v1 ; encoding: [0x00,0x00,0xf8,0xdb,0x01,0x00,0x00,0x06]
+
+ds_read_b96 v[252:254], v255 offset:4
+// GFX1250: ds_load_b96 v[252:254], v255 offset:4 ; encoding: [0x04,0x00,0xf8,0xdb,0xff,0x00,0x00,0xfc]
+
+ds_read_i16 v5, v1
+// GFX1250: ds_load_i16 v5, v1 ; encoding: [0x00,0x00,0xec,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_i16 v5, v1 offset:65535
+// GFX1250: ds_load_i16 v5, v1 offset:65535 ; encoding: [0xff,0xff,0xec,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_i16 v5, v1 offset:0
+// GFX1250: ds_load_i16 v5, v1 ; encoding: [0x00,0x00,0xec,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_i16 v255, v255 offset:4
+// GFX1250: ds_load_i16 v255, v255 offset:4 ; encoding: [0x04,0x00,0xec,0xd8,0xff,0x00,0x00,0xff]
+
+ds_read_i8 v5, v1
+// GFX1250: ds_load_i8 v5, v1 ; encoding: [0x00,0x00,0xe4,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_i8 v5, v1 offset:65535
+// GFX1250: ds_load_i8 v5, v1 offset:65535 ; encoding: [0xff,0xff,0xe4,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_i8 v5, v1 offset:0
+// GFX1250: ds_load_i8 v5, v1 ; encoding: [0x00,0x00,0xe4,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_i8 v255, v255 offset:4
+// GFX1250: ds_load_i8 v255, v255 offset:4 ; encoding: [0x04,0x00,0xe4,0xd8,0xff,0x00,0x00,0xff]
+
+ds_read_i8_d16 v5, v1
+// GFX1250: ds_load_i8_d16 v5, v1 ; encoding: [0x00,0x00,0x90,0xda,0x01,0x00,0x00,0x05]
+
+ds_read_i8_d16 v5, v1 offset:65535
+// GFX1250: ds_load_i8_d16 v5, v1 offset:65535 ; encoding: [0xff,0xff,0x90,0xda,0x01,0x00,0x00,0x05]
+
+ds_read_i8_d16 v5, v1 offset:0
+// GFX1250: ds_load_i8_d16 v5, v1 ; encoding: [0x00,0x00,0x90,0xda,0x01,0x00,0x00,0x05]
+
+ds_read_i8_d16 v255, v255 offset:4
+// GFX1250: ds_load_i8_d16 v255, v255 offset:4 ; encoding: [0x04,0x00,0x90,0xda,0xff,0x00,0x00,0xff]
+
+ds_read_i8_d16_hi v5, v1
+// GFX1250: ds_load_i8_d16_hi v5, v1 ; encoding: [0x00,0x00,0x94,0xda,0x01,0x00,0x00,0x05]
+
+ds_read_i8_d16_hi v5, v1 offset:65535
+// GFX1250: ds_load_i8_d16_hi v5, v1 offset:65535 ; encoding: [0xff,0xff,0x94,0xda,0x01,0x00,0x00,0x05]
+
+ds_read_i8_d16_hi v5, v1 offset:0
+// GFX1250: ds_load_i8_d16_hi v5, v1 ; encoding: [0x00,0x00,0x94,0xda,0x01,0x00,0x00,0x05]
+
+ds_read_i8_d16_hi v255, v255 offset:4
+// GFX1250: ds_load_i8_d16_hi v255, v255 offset:4 ; encoding: [0x04,0x00,0x94,0xda,0xff,0x00,0x00,0xff]
+
+ds_read_u16 v5, v1
+// GFX1250: ds_load_u16 v5, v1 ; encoding: [0x00,0x00,0xf0,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_u16 v5, v1 offset:65535
+// GFX1250: ds_load_u16 v5, v1 offset:65535 ; encoding: [0xff,0xff,0xf0,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_u16 v5, v1 offset:0
+// GFX1250: ds_load_u16 v5, v1 ; encoding: [0x00,0x00,0xf0,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_u16 v255, v255 offset:4
+// GFX1250: ds_load_u16 v255, v255 offset:4 ; encoding: [0x04,0x00,0xf0,0xd8,0xff,0x00,0x00,0xff]
+
+ds_read_u16_d16 v5, v1
+// GFX1250: ds_load_u16_d16 v5, v1 ; encoding: [0x00,0x00,0x98,0xda,0x01,0x00,0x00,0x05]
+
+ds_read_u16_d16 v5, v1 offset:65535
+// GFX1250: ds_load_u16_d16 v5, v1 offset:65535 ; encoding: [0xff,0xff,0x98,0xda,0x01,0x00,0x00,0x05]
+
+ds_read_u16_d16 v5, v1 offset:0
+// GFX1250: ds_load_u16_d16 v5, v1 ; encoding: [0x00,0x00,0x98,0xda,0x01,0x00,0x00,0x05]
+
+ds_read_u16_d16 v255, v255 offset:4
+// GFX1250: ds_load_u16_d16 v255, v255 offset:4 ; encoding: [0x04,0x00,0x98,0xda,0xff,0x00,0x00,0xff]
+
+ds_read_u16_d16_hi v5, v1
+// GFX1250: ds_load_u16_d16_hi v5, v1 ; encoding: [0x00,0x00,0x9c,0xda,0x01,0x00,0x00,0x05]
+
+ds_read_u16_d16_hi v5, v1 offset:65535
+// GFX1250: ds_load_u16_d16_hi v5, v1 offset:65535 ; encoding: [0xff,0xff,0x9c,0xda,0x01,0x00,0x00,0x05]
+
+ds_read_u16_d16_hi v5, v1 offset:0
+// GFX1250: ds_load_u16_d16_hi v5, v1 ; encoding: [0x00,0x00,0x9c,0xda,0x01,0x00,0x00,0x05]
+
+ds_read_u16_d16_hi v255, v255 offset:4
+// GFX1250: ds_load_u16_d16_hi v255, v255 offset:4 ; encoding: [0x04,0x00,0x9c,0xda,0xff,0x00,0x00,0xff]
+
+ds_read_u8 v5, v1
+// GFX1250: ds_load_u8 v5, v1 ; encoding: [0x00,0x00,0xe8,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_u8 v5, v1 offset:65535
+// GFX1250: ds_load_u8 v5, v1 offset:65535 ; encoding: [0xff,0xff,0xe8,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_u8 v5, v1 offset:0
+// GFX1250: ds_load_u8 v5, v1 ; encoding: [0x00,0x00,0xe8,0xd8,0x01,0x00,0x00,0x05]
+
+ds_read_u8 v255, v255 offset:4
+// GFX1250: ds_load_u8 v255, v255 offset:4 ; encoding: [0x04,0x00,0xe8,0xd8,0xff,0x00,0x00,0xff]
+
+ds_read_u8_d16 v5, v1
+// GFX1250: ds_load_u8_d16 v5, v1 ; encoding: [0x00,0x00,0x88,0xda,0x01,0x00,0x00,0x05]
+
+ds_read_u8_d16 v5, v1 offset:65535
+// GFX1250: ds_load_u8_d16 v5, v1 offset:65535 ; encoding: [0xff,0xff,0x88,0xda,0x01,0x00,0x00,0x05]
+
+ds_read_u8_d16 v5, v1 offset:0
+// GFX1250: ds_load_u8_d16 v5, v1 ; encoding: [0x00,0x00,0x88,0xda,0x01,0x00,0x00,0x05]
+
+ds_read_u8_d16 v255, v255 offset:4
+// GFX1250: ds_load_u8_d16 v255, v255 offset:4 ; encoding: [0x04,0x00,0x88,0xda,0xff,0x00,0x00,0xff]
+
+ds_read_u8_d16_hi v5, v1
+// GFX1250: ds_load_u8_d16_hi v5, v1 ; encoding: [0x00,0x00,0x8c,0xda,0x01,0x00,0x00,0x05]
+
+ds_read_u8_d16_hi v5, v1 offset:65535
+// GFX1250: ds_load_u8_d16_hi v5, v1 offset:65535 ; encoding: [0xff,0xff,0x8c,0xda,0x01,0x00,0x00,0x05]
+
+ds_read_u8_d16_hi v5, v1 offset:0
+// GFX1250: ds_load_u8_d16_hi v5, v1 ; encoding: [0x00,0x00,0x8c,0xda,0x01,0x00,0x00,0x05]
+
+ds_read_u8_d16_hi v255, v255 offset:4
+// GFX1250: ds_load_u8_d16_hi v255, v255 offset:4 ; encoding: [0x04,0x00,0x8c,0xda,0xff,0x00,0x00,0xff]
+
+ds_rsub_rtn_u32 v5, v1, v2
+// GFX1250: ds_rsub_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0x88,0xd8,0x01,0x02,0x00,0x05]
+
+ds_rsub_rtn_u32 v5, v1, v2 offset:65535
+// GFX1250: ds_rsub_rtn_u32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0x88,0xd8,0x01,0x02,0x00,0x05]
+
+ds_rsub_rtn_u32 v5, v1, v2 offset:0
+// GFX1250: ds_rsub_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0x88,0xd8,0x01,0x02,0x00,0x05]
+
+ds_rsub_rtn_u32 v255, v255, v255 offset:4
+// GFX1250: ds_rsub_rtn_u32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0x88,0xd8,0xff,0xff,0x00,0xff]
+
+ds_rsub_rtn_u64 v[6:7], v1, v[2:3]
+// GFX1250: ds_rsub_rtn_u64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x88,0xd9,0x01,0x02,0x00,0x06]
+
+ds_rsub_rtn_u64 v[6:7], v1, v[2:3] offset:65535
+// GFX1250: ds_rsub_rtn_u64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x88,0xd9,0x01,0x02,0x00,0x06]
+
+ds_rsub_rtn_u64 v[6:7], v1, v[2:3] offset:0
+// GFX1250: ds_rsub_rtn_u64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x88,0xd9,0x01,0x02,0x00,0x06]
+
+ds_rsub_rtn_u64 v[254:255], v255, v[254:255] offset:4
+// GFX1250: ds_rsub_rtn_u64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x88,0xd9,0xff,0xfe,0x00,0xfe]
+
+ds_rsub_u32 v1, v2
+// GFX1250: ds_rsub_u32 v1, v2 ; encoding: [0x00,0x00,0x08,0xd8,0x01,0x02,0x00,0x00]
+
+ds_rsub_u32 v1, v2 offset:65535
+// GFX1250: ds_rsub_u32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x08,0xd8,0x01,0x02,0x00,0x00]
+
+ds_rsub_u32 v1, v2 offset:0
+// GFX1250: ds_rsub_u32 v1, v2 ; encoding: [0x00,0x00,0x08,0xd8,0x01,0x02,0x00,0x00]
+
+ds_rsub_u32 v255, v255 offset:4
+// GFX1250: ds_rsub_u32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x08,0xd8,0xff,0xff,0x00,0x00]
+
+ds_rsub_u64 v1, v[2:3]
+// GFX1250: ds_rsub_u64 v1, v[2:3] ; encoding: [0x00,0x00,0x08,0xd9,0x01,0x02,0x00,0x00]
+
+ds_rsub_u64 v1, v[2:3] offset:65535
+// GFX1250: ds_rsub_u64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x08,0xd9,0x01,0x02,0x00,0x00]
+
+ds_rsub_u64 v1, v[2:3] offset:0
+// GFX1250: ds_rsub_u64 v1, v[2:3] ; encoding: [0x00,0x00,0x08,0xd9,0x01,0x02,0x00,0x00]
+
+ds_rsub_u64 v255, v[254:255] offset:4
+// GFX1250: ds_rsub_u64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x08,0xd9,0xff,0xfe,0x00,0x00]
+
+ds_store_2addr_b32 v1, v2, v3
+// GFX1250: ds_store_2addr_b32 v1, v2, v3 ; encoding: [0x00,0x00,0x38,0xd8,0x01,0x02,0x03,0x00]
+
+ds_store_2addr_b32 v1, v2, v3 offset0:127 offset1:255
+// GFX1250: ds_store_2addr_b32 v1, v2, v3 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x38,0xd8,0x01,0x02,0x03,0x00]
+
+ds_store_2addr_b32 v1, v2, v3 offset0:0 offset1:0
+// GFX1250: ds_store_2addr_b32 v1, v2, v3 ; encoding: [0x00,0x00,0x38,0xd8,0x01,0x02,0x03,0x00]
+
+ds_store_2addr_b32 v255, v255, v255 offset0:16 offset1:1
+// GFX1250: ds_store_2addr_b32 v255, v255, v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0x38,0xd8,0xff,0xff,0xff,0x00]
+
+ds_store_2addr_b64 v1, v[2:3], v[4:5]
+// GFX1250: ds_store_2addr_b64 v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0x38,0xd9,0x01,0x02,0x04,0x00]
+
+ds_store_2addr_b64 v1, v[2:3], v[4:5] offset0:127 offset1:255
+// GFX1250: ds_store_2addr_b64 v1, v[2:3], v[4:5] offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x38,0xd9,0x01,0x02,0x04,0x00]
+
+ds_store_2addr_b64 v1, v[2:3], v[4:5] offset0:0 offset1:0
+// GFX1250: ds_store_2addr_b64 v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0x38,0xd9,0x01,0x02,0x04,0x00]
+
+ds_store_2addr_b64 v255, v[254:255], v[254:255] offset0:16 offset1:1
+// GFX1250: ds_store_2addr_b64 v255, v[254:255], v[254:255] offset0:16 offset1:1 ; encoding: [0x10,0x01,0x38,0xd9,0xff,0xfe,0xfe,0x00]
+
+ds_store_2addr_stride64_b32 v1, v2, v3
+// GFX1250: ds_store_2addr_stride64_b32 v1, v2, v3 ; encoding: [0x00,0x00,0x3c,0xd8,0x01,0x02,0x03,0x00]
+
+ds_store_2addr_stride64_b32 v1, v2, v3 offset0:127 offset1:255
+// GFX1250: ds_store_2addr_stride64_b32 v1, v2, v3 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x3c,0xd8,0x01,0x02,0x03,0x00]
+
+ds_store_2addr_stride64_b32 v1, v2, v3 offset0:0 offset1:0
+// GFX1250: ds_store_2addr_stride64_b32 v1, v2, v3 ; encoding: [0x00,0x00,0x3c,0xd8,0x01,0x02,0x03,0x00]
+
+ds_store_2addr_stride64_b32 v255, v255, v255 offset0:16 offset1:1
+// GFX1250: ds_store_2addr_stride64_b32 v255, v255, v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0x3c,0xd8,0xff,0xff,0xff,0x00]
+
+ds_store_2addr_stride64_b64 v1, v[2:3], v[4:5]
+// GFX1250: ds_store_2addr_stride64_b64 v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0x3c,0xd9,0x01,0x02,0x04,0x00]
+
+ds_store_2addr_stride64_b64 v1, v[2:3], v[4:5] offset0:127 offset1:255
+// GFX1250: ds_store_2addr_stride64_b64 v1, v[2:3], v[4:5] offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x3c,0xd9,0x01,0x02,0x04,0x00]
+
+ds_store_2addr_stride64_b64 v1, v[2:3], v[4:5] offset0:0 offset1:0
+// GFX1250: ds_store_2addr_stride64_b64 v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0x3c,0xd9,0x01,0x02,0x04,0x00]
+
+ds_store_2addr_stride64_b64 v255, v[254:255], v[254:255] offset0:16 offset1:1
+// GFX1250: ds_store_2addr_stride64_b64 v255, v[254:255], v[254:255] offset0:16 offset1:1 ; encoding: [0x10,0x01,0x3c,0xd9,0xff,0xfe,0xfe,0x00]
+
+ds_store_addtid_b32 v1
+// GFX1250: ds_store_addtid_b32 v1 ; encoding: [0x00,0x00,0xc0,0xda,0x00,0x01,0x00,0x00]
+
+ds_store_addtid_b32 v1 offset:65535
+// GFX1250: ds_store_addtid_b32 v1 offset:65535 ; encoding: [0xff,0xff,0xc0,0xda,0x00,0x01,0x00,0x00]
+
+ds_store_addtid_b32 v1 offset:0
+// GFX1250: ds_store_addtid_b32 v1 ; encoding: [0x00,0x00,0xc0,0xda,0x00,0x01,0x00,0x00]
+
+ds_store_addtid_b32 v255 offset:4
+// GFX1250: ds_store_addtid_b32 v255 offset:4 ; encoding: [0x04,0x00,0xc0,0xda,0x00,0xff,0x00,0x00]
+
+ds_store_b128 v1, v[2:5]
+// GFX1250: ds_store_b128 v1, v[2:5] ; encoding: [0x00,0x00,0x7c,0xdb,0x01,0x02,0x00,0x00]
+
+ds_store_b128 v1, v[2:5] offset:65535
+// GFX1250: ds_store_b128 v1, v[2:5] offset:65535 ; encoding: [0xff,0xff,0x7c,0xdb,0x01,0x02,0x00,0x00]
+
+ds_store_b128 v1, v[2:5] offset:0
+// GFX1250: ds_store_b128 v1, v[2:5] ; encoding: [0x00,0x00,0x7c,0xdb,0x01,0x02,0x00,0x00]
+
+ds_store_b128 v255, v[252:255] offset:4
+// GFX1250: ds_store_b128 v255, v[252:255] offset:4 ; encoding: [0x04,0x00,0x7c,0xdb,0xff,0xfc,0x00,0x00]
+
+ds_store_b16 v1, v2
+// GFX1250: ds_store_b16 v1, v2 ; encoding: [0x00,0x00,0x7c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_store_b16 v1, v2 offset:65535
+// GFX1250: ds_store_b16 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x7c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_store_b16 v1, v2 offset:0
+// GFX1250: ds_store_b16 v1, v2 ; encoding: [0x00,0x00,0x7c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_store_b16 v255, v255 offset:4
+// GFX1250: ds_store_b16 v255, v255 offset:4 ; encoding: [0x04,0x00,0x7c,0xd8,0xff,0xff,0x00,0x00]
+
+ds_store_b16_d16_hi v1, v2
+// GFX1250: ds_store_b16_d16_hi v1, v2 ; encoding: [0x00,0x00,0x84,0xda,0x01,0x02,0x00,0x00]
+
+ds_store_b16_d16_hi v1, v2 offset:65535
+// GFX1250: ds_store_b16_d16_hi v1, v2 offset:65535 ; encoding: [0xff,0xff,0x84,0xda,0x01,0x02,0x00,0x00]
+
+ds_store_b16_d16_hi v1, v2 offset:0
+// GFX1250: ds_store_b16_d16_hi v1, v2 ; encoding: [0x00,0x00,0x84,0xda,0x01,0x02,0x00,0x00]
+
+ds_store_b16_d16_hi v255, v255 offset:4
+// GFX1250: ds_store_b16_d16_hi v255, v255 offset:4 ; encoding: [0x04,0x00,0x84,0xda,0xff,0xff,0x00,0x00]
+
+ds_store_b32 v1, v2
+// GFX1250: ds_store_b32 v1, v2 ; encoding: [0x00,0x00,0x34,0xd8,0x01,0x02,0x00,0x00]
+
+ds_store_b32 v1, v2 offset:65535
+// GFX1250: ds_store_b32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x34,0xd8,0x01,0x02,0x00,0x00]
+
+ds_store_b32 v1, v2 offset:0
+// GFX1250: ds_store_b32 v1, v2 ; encoding: [0x00,0x00,0x34,0xd8,0x01,0x02,0x00,0x00]
+
+ds_store_b32 v255, v255 offset:4
+// GFX1250: ds_store_b32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x34,0xd8,0xff,0xff,0x00,0x00]
+
+ds_store_b64 v1, v[2:3]
+// GFX1250: ds_store_b64 v1, v[2:3] ; encoding: [0x00,0x00,0x34,0xd9,0x01,0x02,0x00,0x00]
+
+ds_store_b64 v1, v[2:3] offset:65535
+// GFX1250: ds_store_b64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x34,0xd9,0x01,0x02,0x00,0x00]
+
+ds_store_b64 v1, v[2:3] offset:0
+// GFX1250: ds_store_b64 v1, v[2:3] ; encoding: [0x00,0x00,0x34,0xd9,0x01,0x02,0x00,0x00]
+
+ds_store_b64 v255, v[254:255] offset:4
+// GFX1250: ds_store_b64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x34,0xd9,0xff,0xfe,0x00,0x00]
+
+ds_store_b8 v1, v2
+// GFX1250: ds_store_b8 v1, v2 ; encoding: [0x00,0x00,0x78,0xd8,0x01,0x02,0x00,0x00]
+
+ds_store_b8 v1, v2 offset:65535
+// GFX1250: ds_store_b8 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x78,0xd8,0x01,0x02,0x00,0x00]
+
+ds_store_b8 v1, v2 offset:0
+// GFX1250: ds_store_b8 v1, v2 ; encoding: [0x00,0x00,0x78,0xd8,0x01,0x02,0x00,0x00]
+
+ds_store_b8 v255, v255 offset:4
+// GFX1250: ds_store_b8 v255, v255 offset:4 ; encoding: [0x04,0x00,0x78,0xd8,0xff,0xff,0x00,0x00]
+
+ds_store_b8_d16_hi v1, v2
+// GFX1250: ds_store_b8_d16_hi v1, v2 ; encoding: [0x00,0x00,0x80,0xda,0x01,0x02,0x00,0x00]
+
+ds_store_b8_d16_hi v1, v2 offset:65535
+// GFX1250: ds_store_b8_d16_hi v1, v2 offset:65535 ; encoding: [0xff,0xff,0x80,0xda,0x01,0x02,0x00,0x00]
+
+ds_store_b8_d16_hi v1, v2 offset:0
+// GFX1250: ds_store_b8_d16_hi v1, v2 ; encoding: [0x00,0x00,0x80,0xda,0x01,0x02,0x00,0x00]
+
+ds_store_b8_d16_hi v255, v255 offset:4
+// GFX1250: ds_store_b8_d16_hi v255, v255 offset:4 ; encoding: [0x04,0x00,0x80,0xda,0xff,0xff,0x00,0x00]
+
+ds_store_b96 v1, v[2:4]
+// GFX1250: ds_store_b96 v1, v[2:4] ; encoding: [0x00,0x00,0x78,0xdb,0x01,0x02,0x00,0x00]
+
+ds_store_b96 v1, v[2:4] offset:65535
+// GFX1250: ds_store_b96 v1, v[2:4] offset:65535 ; encoding: [0xff,0xff,0x78,0xdb,0x01,0x02,0x00,0x00]
+
+ds_store_b96 v1, v[2:4] offset:0
+// GFX1250: ds_store_b96 v1, v[2:4] ; encoding: [0x00,0x00,0x78,0xdb,0x01,0x02,0x00,0x00]
+
+ds_store_b96 v255, v[252:254] offset:4
+// GFX1250: ds_store_b96 v255, v[252:254] offset:4 ; encoding: [0x04,0x00,0x78,0xdb,0xff,0xfc,0x00,0x00]
+
+ds_storexchg_2addr_rtn_b32 v[6:7], v1, v2, v3
+// GFX1250: ds_storexchg_2addr_rtn_b32 v[6:7], v1, v2, v3 ; encoding: [0x00,0x00,0xb8,0xd8,0x01,0x02,0x03,0x06]
+
+ds_storexchg_2addr_rtn_b32 v[6:7], v1, v2, v3 offset0:127 offset1:255
+// GFX1250: ds_storexchg_2addr_rtn_b32 v[6:7], v1, v2, v3 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xb8,0xd8,0x01,0x02,0x03,0x06]
+
+ds_storexchg_2addr_rtn_b32 v[6:7], v1, v2, v3 offset0:0 offset1:0
+// GFX1250: ds_storexchg_2addr_rtn_b32 v[6:7], v1, v2, v3 ; encoding: [0x00,0x00,0xb8,0xd8,0x01,0x02,0x03,0x06]
+
+ds_storexchg_2addr_rtn_b32 v[254:255], v255, v255, v255 offset0:16 offset1:1
+// GFX1250: ds_storexchg_2addr_rtn_b32 v[254:255], v255, v255, v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0xb8,0xd8,0xff,0xff,0xff,0xfe]
+
+ds_storexchg_2addr_rtn_b64 v[6:9], v1, v[2:3], v[4:5]
+// GFX1250: ds_storexchg_2addr_rtn_b64 v[6:9], v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0xb8,0xd9,0x01,0x02,0x04,0x06]
+
+ds_storexchg_2addr_rtn_b64 v[6:9], v1, v[2:3], v[4:5] offset0:127 offset1:255
+// GFX1250: ds_storexchg_2addr_rtn_b64 v[6:9], v1, v[2:3], v[4:5] offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xb8,0xd9,0x01,0x02,0x04,0x06]
+
+ds_storexchg_2addr_rtn_b64 v[6:9], v1, v[2:3], v[4:5] offset0:0 offset1:0
+// GFX1250: ds_storexchg_2addr_rtn_b64 v[6:9], v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0xb8,0xd9,0x01,0x02,0x04,0x06]
+
+ds_storexchg_2addr_rtn_b64 v[252:255], v255, v[254:255], v[254:255] offset0:16 offset1:1
+// GFX1250: ds_storexchg_2addr_rtn_b64 v[252:255], v255, v[254:255], v[254:255] offset0:16 offset1:1 ; encoding: [0x10,0x01,0xb8,0xd9,0xff,0xfe,0xfe,0xfc]
+
+ds_storexchg_2addr_stride64_rtn_b32 v[6:7], v1, v2, v3
+// GFX1250: ds_storexchg_2addr_stride64_rtn_b32 v[6:7], v1, v2, v3 ; encoding: [0x00,0x00,0xbc,0xd8,0x01,0x02,0x03,0x06]
+
+ds_storexchg_2addr_stride64_rtn_b32 v[6:7], v1, v2, v3 offset0:127 offset1:255
+// GFX1250: ds_storexchg_2addr_stride64_rtn_b32 v[6:7], v1, v2, v3 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xbc,0xd8,0x01,0x02,0x03,0x06]
+
+ds_storexchg_2addr_stride64_rtn_b32 v[6:7], v1, v2, v3 offset0:0 offset1:0
+// GFX1250: ds_storexchg_2addr_stride64_rtn_b32 v[6:7], v1, v2, v3 ; encoding: [0x00,0x00,0xbc,0xd8,0x01,0x02,0x03,0x06]
+
+ds_storexchg_2addr_stride64_rtn_b32 v[254:255], v255, v255, v255 offset0:16 offset1:1
+// GFX1250: ds_storexchg_2addr_stride64_rtn_b32 v[254:255], v255, v255, v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0xbc,0xd8,0xff,0xff,0xff,0xfe]
+
+ds_storexchg_2addr_stride64_rtn_b64 v[6:9], v1, v[2:3], v[4:5]
+// GFX1250: ds_storexchg_2addr_stride64_rtn_b64 v[6:9], v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0xbc,0xd9,0x01,0x02,0x04,0x06]
+
+ds_storexchg_2addr_stride64_rtn_b64 v[6:9], v1, v[2:3], v[4:5] offset0:127 offset1:255
+// GFX1250: ds_storexchg_2addr_stride64_rtn_b64 v[6:9], v1, v[2:3], v[4:5] offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xbc,0xd9,0x01,0x02,0x04,0x06]
+
+ds_storexchg_2addr_stride64_rtn_b64 v[6:9], v1, v[2:3], v[4:5] offset0:0 offset1:0
+// GFX1250: ds_storexchg_2addr_stride64_rtn_b64 v[6:9], v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0xbc,0xd9,0x01,0x02,0x04,0x06]
+
+ds_storexchg_2addr_stride64_rtn_b64 v[252:255], v255, v[254:255], v[254:255] offset0:16 offset1:1
+// GFX1250: ds_storexchg_2addr_stride64_rtn_b64 v[252:255], v255, v[254:255], v[254:255] offset0:16 offset1:1 ; encoding: [0x10,0x01,0xbc,0xd9,0xff,0xfe,0xfe,0xfc]
+
+ds_storexchg_rtn_b32 v5, v1, v2
+// GFX1250: ds_storexchg_rtn_b32 v5, v1, v2 ; encoding: [0x00,0x00,0xb4,0xd8,0x01,0x02,0x00,0x05]
+
+ds_storexchg_rtn_b32 v5, v1, v2 offset:65535
+// GFX1250: ds_storexchg_rtn_b32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xb4,0xd8,0x01,0x02,0x00,0x05]
+
+ds_storexchg_rtn_b32 v5, v1, v2 offset:0
+// GFX1250: ds_storexchg_rtn_b32 v5, v1, v2 ; encoding: [0x00,0x00,0xb4,0xd8,0x01,0x02,0x00,0x05]
+
+ds_storexchg_rtn_b32 v255, v255, v255 offset:4
+// GFX1250: ds_storexchg_rtn_b32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xb4,0xd8,0xff,0xff,0x00,0xff]
+
+ds_storexchg_rtn_b64 v[6:7], v1, v[2:3]
+// GFX1250: ds_storexchg_rtn_b64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xb4,0xd9,0x01,0x02,0x00,0x06]
+
+ds_storexchg_rtn_b64 v[6:7], v1, v[2:3] offset:65535
+// GFX1250: ds_storexchg_rtn_b64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0xb4,0xd9,0x01,0x02,0x00,0x06]
+
+ds_storexchg_rtn_b64 v[6:7], v1, v[2:3] offset:0
+// GFX1250: ds_storexchg_rtn_b64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xb4,0xd9,0x01,0x02,0x00,0x06]
+
+ds_storexchg_rtn_b64 v[254:255], v255, v[254:255] offset:4
+// GFX1250: ds_storexchg_rtn_b64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0xb4,0xd9,0xff,0xfe,0x00,0xfe]
+
+ds_cond_sub_rtn_u32 v5, v1, v2
+// GFX1250: ds_cond_sub_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0xa0,0xda,0x01,0x02,0x00,0x05]
+
+ds_cond_sub_rtn_u32 v5, v1, v2 offset:65535
+// GFX1250: ds_cond_sub_rtn_u32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xa0,0xda,0x01,0x02,0x00,0x05]
+
+ds_cond_sub_rtn_u32 v5, v1, v2 offset:0
+// GFX1250: ds_cond_sub_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0xa0,0xda,0x01,0x02,0x00,0x05]
+
+ds_cond_sub_u32 v1, v2
+// GFX1250: ds_cond_sub_u32 v1, v2 ; encoding: [0x00,0x00,0x60,0xda,0x01,0x02,0x00,0x00]
+
+ds_cond_sub_u32 v1, v2 offset:65535
+// GFX1250: ds_cond_sub_u32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x60,0xda,0x01,0x02,0x00,0x00]
+
+ds_cond_sub_u32 v1, v2 offset:0
+// GFX1250: ds_cond_sub_u32 v1, v2 ; encoding: [0x00,0x00,0x60,0xda,0x01,0x02,0x00,0x00]
+
+ds_sub_clamp_rtn_u32 v5, v1, v2
+// GFX1250: ds_sub_clamp_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0xa4,0xda,0x01,0x02,0x00,0x05]
+
+ds_sub_clamp_rtn_u32 v5, v1, v2 offset:65535
+// GFX1250: ds_sub_clamp_rtn_u32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xa4,0xda,0x01,0x02,0x00,0x05]
+
+ds_sub_clamp_rtn_u32 v5, v1, v2 offset:0
+// GFX1250: ds_sub_clamp_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0xa4,0xda,0x01,0x02,0x00,0x05]
+
+ds_sub_clamp_rtn_u32 v255, v255, v255 offset:4
+// GFX1250: ds_sub_clamp_rtn_u32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xa4,0xda,0xff,0xff,0x00,0xff]
+
+ds_sub_clamp_u32 v1, v2
+// GFX1250: ds_sub_clamp_u32 v1, v2 ; encoding: [0x00,0x00,0x64,0xda,0x01,0x02,0x00,0x00]
+
+ds_sub_clamp_u32 v1, v2 offset:65535
+// GFX1250: ds_sub_clamp_u32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x64,0xda,0x01,0x02,0x00,0x00]
+
+ds_sub_clamp_u32 v1, v2 offset:0
+// GFX1250: ds_sub_clamp_u32 v1, v2 ; encoding: [0x00,0x00,0x64,0xda,0x01,0x02,0x00,0x00]
+
+ds_sub_clamp_u32 v255, v255 offset:4
+// GFX1250: ds_sub_clamp_u32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x64,0xda,0xff,0xff,0x00,0x00]
+
+ds_sub_rtn_u32 v5, v1, v2
+// GFX1250: ds_sub_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0x84,0xd8,0x01,0x02,0x00,0x05]
+
+ds_sub_rtn_u32 v5, v1, v2 offset:65535
+// GFX1250: ds_sub_rtn_u32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0x84,0xd8,0x01,0x02,0x00,0x05]
+
+ds_sub_rtn_u32 v5, v1, v2 offset:0
+// GFX1250: ds_sub_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0x84,0xd8,0x01,0x02,0x00,0x05]
+
+ds_sub_rtn_u32 v255, v255, v255 offset:4
+// GFX1250: ds_sub_rtn_u32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0x84,0xd8,0xff,0xff,0x00,0xff]
+
+ds_sub_rtn_u64 v[6:7], v1, v[2:3]
+// GFX1250: ds_sub_rtn_u64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x84,0xd9,0x01,0x02,0x00,0x06]
+
+ds_sub_rtn_u64 v[6:7], v1, v[2:3] offset:65535
+// GFX1250: ds_sub_rtn_u64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x84,0xd9,0x01,0x02,0x00,0x06]
+
+ds_sub_rtn_u64 v[6:7], v1, v[2:3] offset:0
+// GFX1250: ds_sub_rtn_u64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x84,0xd9,0x01,0x02,0x00,0x06]
+
+ds_sub_rtn_u64 v[254:255], v255, v[254:255] offset:4
+// GFX1250: ds_sub_rtn_u64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x84,0xd9,0xff,0xfe,0x00,0xfe]
+
+ds_sub_u32 v1, v2
+// GFX1250: ds_sub_u32 v1, v2 ; encoding: [0x00,0x00,0x04,0xd8,0x01,0x02,0x00,0x00]
+
+ds_sub_u32 v1, v2 offset:65535
+// GFX1250: ds_sub_u32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x04,0xd8,0x01,0x02,0x00,0x00]
+
+ds_sub_u32 v1, v2 offset:0
+// GFX1250: ds_sub_u32 v1, v2 ; encoding: [0x00,0x00,0x04,0xd8,0x01,0x02,0x00,0x00]
+
+ds_sub_u32 v255, v255 offset:4
+// GFX1250: ds_sub_u32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x04,0xd8,0xff,0xff,0x00,0x00]
+
+ds_sub_u64 v1, v[2:3]
+// GFX1250: ds_sub_u64 v1, v[2:3] ; encoding: [0x00,0x00,0x04,0xd9,0x01,0x02,0x00,0x00]
+
+ds_sub_u64 v1, v[2:3] offset:65535
+// GFX1250: ds_sub_u64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x04,0xd9,0x01,0x02,0x00,0x00]
+
+ds_sub_u64 v1, v[2:3] offset:0
+// GFX1250: ds_sub_u64 v1, v[2:3] ; encoding: [0x00,0x00,0x04,0xd9,0x01,0x02,0x00,0x00]
+
+ds_sub_u64 v255, v[254:255] offset:4
+// GFX1250: ds_sub_u64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x04,0xd9,0xff,0xfe,0x00,0x00]
+
+ds_write2_b32 v1, v2, v3
+// GFX1250: ds_store_2addr_b32 v1, v2, v3 ; encoding: [0x00,0x00,0x38,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2_b32 v1, v2, v3 offset0:127 offset1:255
+// GFX1250: ds_store_2addr_b32 v1, v2, v3 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x38,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2_b32 v1, v2, v3 offset0:0 offset1:0
+// GFX1250: ds_store_2addr_b32 v1, v2, v3 ; encoding: [0x00,0x00,0x38,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2_b32 v255, v255, v255 offset0:16 offset1:1
+// GFX1250: ds_store_2addr_b32 v255, v255, v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0x38,0xd8,0xff,0xff,0xff,0x00]
+
+ds_write2_b64 v1, v[2:3], v[4:5]
+// GFX1250: ds_store_2addr_b64 v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0x38,0xd9,0x01,0x02,0x04,0x00]
+
+ds_write2_b64 v1, v[2:3], v[4:5] offset0:127 offset1:255
+// GFX1250: ds_store_2addr_b64 v1, v[2:3], v[4:5] offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x38,0xd9,0x01,0x02,0x04,0x00]
+
+ds_write2_b64 v1, v[2:3], v[4:5] offset0:0 offset1:0
+// GFX1250: ds_store_2addr_b64 v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0x38,0xd9,0x01,0x02,0x04,0x00]
+
+ds_write2_b64 v255, v[254:255], v[254:255] offset0:16 offset1:1
+// GFX1250: ds_store_2addr_b64 v255, v[254:255], v[254:255] offset0:16 offset1:1 ; encoding: [0x10,0x01,0x38,0xd9,0xff,0xfe,0xfe,0x00]
+
+ds_write2st64_b32 v1, v2, v3
+// GFX1250: ds_store_2addr_stride64_b32 v1, v2, v3 ; encoding: [0x00,0x00,0x3c,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2st64_b32 v1, v2, v3 offset0:127 offset1:255
+// GFX1250: ds_store_2addr_stride64_b32 v1, v2, v3 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x3c,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2st64_b32 v1, v2, v3 offset0:0 offset1:0
+// GFX1250: ds_store_2addr_stride64_b32 v1, v2, v3 ; encoding: [0x00,0x00,0x3c,0xd8,0x01,0x02,0x03,0x00]
+
+ds_write2st64_b32 v255, v255, v255 offset0:16 offset1:1
+// GFX1250: ds_store_2addr_stride64_b32 v255, v255, v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0x3c,0xd8,0xff,0xff,0xff,0x00]
+
+ds_write2st64_b64 v1, v[2:3], v[4:5]
+// GFX1250: ds_store_2addr_stride64_b64 v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0x3c,0xd9,0x01,0x02,0x04,0x00]
+
+ds_write2st64_b64 v1, v[2:3], v[4:5] offset0:127 offset1:255
+// GFX1250: ds_store_2addr_stride64_b64 v1, v[2:3], v[4:5] offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x3c,0xd9,0x01,0x02,0x04,0x00]
+
+ds_write2st64_b64 v1, v[2:3], v[4:5] offset0:0 offset1:0
+// GFX1250: ds_store_2addr_stride64_b64 v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0x3c,0xd9,0x01,0x02,0x04,0x00]
+
+ds_write2st64_b64 v255, v[254:255], v[254:255] offset0:16 offset1:1
+// GFX1250: ds_store_2addr_stride64_b64 v255, v[254:255], v[254:255] offset0:16 offset1:1 ; encoding: [0x10,0x01,0x3c,0xd9,0xff,0xfe,0xfe,0x00]
+
+ds_write_addtid_b32 v1
+// GFX1250: ds_store_addtid_b32 v1 ; encoding: [0x00,0x00,0xc0,0xda,0x00,0x01,0x00,0x00]
+
+ds_write_addtid_b32 v1 offset:65535
+// GFX1250: ds_store_addtid_b32 v1 offset:65535 ; encoding: [0xff,0xff,0xc0,0xda,0x00,0x01,0x00,0x00]
+
+ds_write_addtid_b32 v1 offset:0
+// GFX1250: ds_store_addtid_b32 v1 ; encoding: [0x00,0x00,0xc0,0xda,0x00,0x01,0x00,0x00]
+
+ds_write_addtid_b32 v255 offset:4
+// GFX1250: ds_store_addtid_b32 v255 offset:4 ; encoding: [0x04,0x00,0xc0,0xda,0x00,0xff,0x00,0x00]
+
+ds_write_b128 v1, v[2:5]
+// GFX1250: ds_store_b128 v1, v[2:5] ; encoding: [0x00,0x00,0x7c,0xdb,0x01,0x02,0x00,0x00]
+
+ds_write_b128 v1, v[2:5] offset:65535
+// GFX1250: ds_store_b128 v1, v[2:5] offset:65535 ; encoding: [0xff,0xff,0x7c,0xdb,0x01,0x02,0x00,0x00]
+
+ds_write_b128 v1, v[2:5] offset:0
+// GFX1250: ds_store_b128 v1, v[2:5] ; encoding: [0x00,0x00,0x7c,0xdb,0x01,0x02,0x00,0x00]
+
+ds_write_b128 v255, v[252:255] offset:4
+// GFX1250: ds_store_b128 v255, v[252:255] offset:4 ; encoding: [0x04,0x00,0x7c,0xdb,0xff,0xfc,0x00,0x00]
+
+ds_write_b16 v1, v2
+// GFX1250: ds_store_b16 v1, v2 ; encoding: [0x00,0x00,0x7c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b16 v1, v2 offset:65535
+// GFX1250: ds_store_b16 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x7c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b16 v1, v2 offset:0
+// GFX1250: ds_store_b16 v1, v2 ; encoding: [0x00,0x00,0x7c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b16 v255, v255 offset:4
+// GFX1250: ds_store_b16 v255, v255 offset:4 ; encoding: [0x04,0x00,0x7c,0xd8,0xff,0xff,0x00,0x00]
+
+ds_write_b16_d16_hi v1, v2
+// GFX1250: ds_store_b16_d16_hi v1, v2 ; encoding: [0x00,0x00,0x84,0xda,0x01,0x02,0x00,0x00]
+
+ds_write_b16_d16_hi v1, v2 offset:65535
+// GFX1250: ds_store_b16_d16_hi v1, v2 offset:65535 ; encoding: [0xff,0xff,0x84,0xda,0x01,0x02,0x00,0x00]
+
+ds_write_b16_d16_hi v1, v2 offset:0
+// GFX1250: ds_store_b16_d16_hi v1, v2 ; encoding: [0x00,0x00,0x84,0xda,0x01,0x02,0x00,0x00]
+
+ds_write_b16_d16_hi v255, v255 offset:4
+// GFX1250: ds_store_b16_d16_hi v255, v255 offset:4 ; encoding: [0x04,0x00,0x84,0xda,0xff,0xff,0x00,0x00]
+
+ds_write_b32 v1, v2
+// GFX1250: ds_store_b32 v1, v2 ; encoding: [0x00,0x00,0x34,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b32 v1, v2 offset:65535
+// GFX1250: ds_store_b32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x34,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b32 v1, v2 offset:0
+// GFX1250: ds_store_b32 v1, v2 ; encoding: [0x00,0x00,0x34,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b32 v255, v255 offset:4
+// GFX1250: ds_store_b32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x34,0xd8,0xff,0xff,0x00,0x00]
+
+ds_write_b64 v1, v[2:3]
+// GFX1250: ds_store_b64 v1, v[2:3] ; encoding: [0x00,0x00,0x34,0xd9,0x01,0x02,0x00,0x00]
+
+ds_write_b64 v1, v[2:3] offset:65535
+// GFX1250: ds_store_b64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x34,0xd9,0x01,0x02,0x00,0x00]
+
+ds_write_b64 v1, v[2:3] offset:0
+// GFX1250: ds_store_b64 v1, v[2:3] ; encoding: [0x00,0x00,0x34,0xd9,0x01,0x02,0x00,0x00]
+
+ds_write_b64 v255, v[254:255] offset:4
+// GFX1250: ds_store_b64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x34,0xd9,0xff,0xfe,0x00,0x00]
+
+ds_write_b8 v1, v2
+// GFX1250: ds_store_b8 v1, v2 ; encoding: [0x00,0x00,0x78,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b8 v1, v2 offset:65535
+// GFX1250: ds_store_b8 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x78,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b8 v1, v2 offset:0
+// GFX1250: ds_store_b8 v1, v2 ; encoding: [0x00,0x00,0x78,0xd8,0x01,0x02,0x00,0x00]
+
+ds_write_b8 v255, v255 offset:4
+// GFX1250: ds_store_b8 v255, v255 offset:4 ; encoding: [0x04,0x00,0x78,0xd8,0xff,0xff,0x00,0x00]
+
+ds_write_b8_d16_hi v1, v2
+// GFX1250: ds_store_b8_d16_hi v1, v2 ; encoding: [0x00,0x00,0x80,0xda,0x01,0x02,0x00,0x00]
+
+ds_write_b8_d16_hi v1, v2 offset:65535
+// GFX1250: ds_store_b8_d16_hi v1, v2 offset:65535 ; encoding: [0xff,0xff,0x80,0xda,0x01,0x02,0x00,0x00]
+
+ds_write_b8_d16_hi v1, v2 offset:0
+// GFX1250: ds_store_b8_d16_hi v1, v2 ; encoding: [0x00,0x00,0x80,0xda,0x01,0x02,0x00,0x00]
+
+ds_write_b8_d16_hi v255, v255 offset:4
+// GFX1250: ds_store_b8_d16_hi v255, v255 offset:4 ; encoding: [0x04,0x00,0x80,0xda,0xff,0xff,0x00,0x00]
+
+ds_write_b96 v1, v[2:4]
+// GFX1250: ds_store_b96 v1, v[2:4] ; encoding: [0x00,0x00,0x78,0xdb,0x01,0x02,0x00,0x00]
+
+ds_write_b96 v1, v[2:4] offset:65535
+// GFX1250: ds_store_b96 v1, v[2:4] offset:65535 ; encoding: [0xff,0xff,0x78,0xdb,0x01,0x02,0x00,0x00]
+
+ds_write_b96 v1, v[2:4] offset:0
+// GFX1250: ds_store_b96 v1, v[2:4] ; encoding: [0x00,0x00,0x78,0xdb,0x01,0x02,0x00,0x00]
+
+ds_write_b96 v255, v[252:254] offset:4
+// GFX1250: ds_store_b96 v255, v[252:254] offset:4 ; encoding: [0x04,0x00,0x78,0xdb,0xff,0xfc,0x00,0x00]
+
+ds_wrxchg2_rtn_b32 v[6:7], v1, v2, v3
+// GFX1250: ds_storexchg_2addr_rtn_b32 v[6:7], v1, v2, v3 ; encoding: [0x00,0x00,0xb8,0xd8,0x01,0x02,0x03,0x06]
+
+ds_wrxchg2_rtn_b32 v[6:7], v1, v2, v3 offset0:127 offset1:255
+// GFX1250: ds_storexchg_2addr_rtn_b32 v[6:7], v1, v2, v3 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xb8,0xd8,0x01,0x02,0x03,0x06]
+
+ds_wrxchg2_rtn_b32 v[6:7], v1, v2, v3 offset0:0 offset1:0
+// GFX1250: ds_storexchg_2addr_rtn_b32 v[6:7], v1, v2, v3 ; encoding: [0x00,0x00,0xb8,0xd8,0x01,0x02,0x03,0x06]
+
+ds_wrxchg2_rtn_b32 v[254:255], v255, v255, v255 offset0:16 offset1:1
+// GFX1250: ds_storexchg_2addr_rtn_b32 v[254:255], v255, v255, v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0xb8,0xd8,0xff,0xff,0xff,0xfe]
+
+ds_wrxchg2_rtn_b64 v[6:9], v1, v[2:3], v[4:5]
+// GFX1250: ds_storexchg_2addr_rtn_b64 v[6:9], v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0xb8,0xd9,0x01,0x02,0x04,0x06]
+
+ds_wrxchg2_rtn_b64 v[6:9], v1, v[2:3], v[4:5] offset0:127 offset1:255
+// GFX1250: ds_storexchg_2addr_rtn_b64 v[6:9], v1, v[2:3], v[4:5] offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xb8,0xd9,0x01,0x02,0x04,0x06]
+
+ds_wrxchg2_rtn_b64 v[6:9], v1, v[2:3], v[4:5] offset0:0 offset1:0
+// GFX1250: ds_storexchg_2addr_rtn_b64 v[6:9], v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0xb8,0xd9,0x01,0x02,0x04,0x06]
+
+ds_wrxchg2_rtn_b64 v[252:255], v255, v[254:255], v[254:255] offset0:16 offset1:1
+// GFX1250: ds_storexchg_2addr_rtn_b64 v[252:255], v255, v[254:255], v[254:255] offset0:16 offset1:1 ; encoding: [0x10,0x01,0xb8,0xd9,0xff,0xfe,0xfe,0xfc]
+
+ds_wrxchg2st64_rtn_b32 v[6:7], v1, v2, v3
+// GFX1250: ds_storexchg_2addr_stride64_rtn_b32 v[6:7], v1, v2, v3 ; encoding: [0x00,0x00,0xbc,0xd8,0x01,0x02,0x03,0x06]
+
+ds_wrxchg2st64_rtn_b32 v[6:7], v1, v2, v3 offset0:127 offset1:255
+// GFX1250: ds_storexchg_2addr_stride64_rtn_b32 v[6:7], v1, v2, v3 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xbc,0xd8,0x01,0x02,0x03,0x06]
+
+ds_wrxchg2st64_rtn_b32 v[6:7], v1, v2, v3 offset0:0 offset1:0
+// GFX1250: ds_storexchg_2addr_stride64_rtn_b32 v[6:7], v1, v2, v3 ; encoding: [0x00,0x00,0xbc,0xd8,0x01,0x02,0x03,0x06]
+
+ds_wrxchg2st64_rtn_b32 v[254:255], v255, v255, v255 offset0:16 offset1:1
+// GFX1250: ds_storexchg_2addr_stride64_rtn_b32 v[254:255], v255, v255, v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0xbc,0xd8,0xff,0xff,0xff,0xfe]
+
+ds_wrxchg2st64_rtn_b64 v[6:9], v1, v[2:3], v[4:5]
+// GFX1250: ds_storexchg_2addr_stride64_rtn_b64 v[6:9], v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0xbc,0xd9,0x01,0x02,0x04,0x06]
+
+ds_wrxchg2st64_rtn_b64 v[6:9], v1, v[2:3], v[4:5] offset0:127 offset1:255
+// GFX1250: ds_storexchg_2addr_stride64_rtn_b64 v[6:9], v1, v[2:3], v[4:5] offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xbc,0xd9,0x01,0x02,0x04,0x06]
+
+ds_wrxchg2st64_rtn_b64 v[6:9], v1, v[2:3], v[4:5] offset0:0 offset1:0
+// GFX1250: ds_storexchg_2addr_stride64_rtn_b64 v[6:9], v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0xbc,0xd9,0x01,0x02,0x04,0x06]
+
+ds_wrxchg2st64_rtn_b64 v[252:255], v255, v[254:255], v[254:255] offset0:16 offset1:1
+// GFX1250: ds_storexchg_2addr_stride64_rtn_b64 v[252:255], v255, v[254:255], v[254:255] offset0:16 offset1:1 ; encoding: [0x10,0x01,0xbc,0xd9,0xff,0xfe,0xfe,0xfc]
+
+ds_wrxchg_rtn_b32 v5, v1, v2
+// GFX1250: ds_storexchg_rtn_b32 v5, v1, v2 ; encoding: [0x00,0x00,0xb4,0xd8,0x01,0x02,0x00,0x05]
+
+ds_wrxchg_rtn_b32 v5, v1, v2 offset:65535
+// GFX1250: ds_storexchg_rtn_b32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xb4,0xd8,0x01,0x02,0x00,0x05]
+
+ds_wrxchg_rtn_b32 v5, v1, v2 offset:0
+// GFX1250: ds_storexchg_rtn_b32 v5, v1, v2 ; encoding: [0x00,0x00,0xb4,0xd8,0x01,0x02,0x00,0x05]
+
+ds_wrxchg_rtn_b32 v255, v255, v255 offset:4
+// GFX1250: ds_storexchg_rtn_b32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xb4,0xd8,0xff,0xff,0x00,0xff]
+
+ds_wrxchg_rtn_b64 v[6:7], v1, v[2:3]
+// GFX1250: ds_storexchg_rtn_b64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xb4,0xd9,0x01,0x02,0x00,0x06]
+
+ds_wrxchg_rtn_b64 v[6:7], v1, v[2:3] offset:65535
+// GFX1250: ds_storexchg_rtn_b64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0xb4,0xd9,0x01,0x02,0x00,0x06]
+
+ds_wrxchg_rtn_b64 v[6:7], v1, v[2:3] offset:0
+// GFX1250: ds_storexchg_rtn_b64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xb4,0xd9,0x01,0x02,0x00,0x06]
+
+ds_wrxchg_rtn_b64 v[254:255], v255, v[254:255] offset:4
+// GFX1250: ds_storexchg_rtn_b64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0xb4,0xd9,0xff,0xfe,0x00,0xfe]
+
+ds_xor_b32 v1, v2
+// GFX1250: ds_xor_b32 v1, v2 ; encoding: [0x00,0x00,0x2c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_xor_b32 v1, v2 offset:65535
+// GFX1250: ds_xor_b32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x2c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_xor_b32 v1, v2 offset:0
+// GFX1250: ds_xor_b32 v1, v2 ; encoding: [0x00,0x00,0x2c,0xd8,0x01,0x02,0x00,0x00]
+
+ds_xor_b32 v255, v255 offset:4
+// GFX1250: ds_xor_b32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x2c,0xd8,0xff,0xff,0x00,0x00]
+
+ds_xor_b64 v1, v[2:3]
+// GFX1250: ds_xor_b64 v1, v[2:3] ; encoding: [0x00,0x00,0x2c,0xd9,0x01,0x02,0x00,0x00]
+
+ds_xor_b64 v1, v[2:3] offset:65535
+// GFX1250: ds_xor_b64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x2c,0xd9,0x01,0x02,0x00,0x00]
+
+ds_xor_b64 v1, v[2:3] offset:0
+// GFX1250: ds_xor_b64 v1, v[2:3] ; encoding: [0x00,0x00,0x2c,0xd9,0x01,0x02,0x00,0x00]
+
+ds_xor_b64 v255, v[254:255] offset:4
+// GFX1250: ds_xor_b64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x2c,0xd9,0xff,0xfe,0x00,0x00]
+
+ds_xor_rtn_b32 v5, v1, v2
+// GFX1250: ds_xor_rtn_b32 v5, v1, v2 ; encoding: [0x00,0x00,0xac,0xd8,0x01,0x02,0x00,0x05]
+
+ds_xor_rtn_b32 v5, v1, v2 offset:65535
+// GFX1250: ds_xor_rtn_b32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xac,0xd8,0x01,0x02,0x00,0x05]
+
+ds_xor_rtn_b32 v5, v1, v2 offset:0
+// GFX1250: ds_xor_rtn_b32 v5, v1, v2 ; encoding: [0x00,0x00,0xac,0xd8,0x01,0x02,0x00,0x05]
+
+ds_xor_rtn_b32 v255, v255, v255 offset:4
+// GFX1250: ds_xor_rtn_b32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xac,0xd8,0xff,0xff,0x00,0xff]
+
+ds_xor_rtn_b64 v[6:7], v1, v[2:3]
+// GFX1250: ds_xor_rtn_b64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xac,0xd9,0x01,0x02,0x00,0x06]
+
+ds_xor_rtn_b64 v[6:7], v1, v[2:3] offset:65535
+// GFX1250: ds_xor_rtn_b64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0xac,0xd9,0x01,0x02,0x00,0x06]
+
+ds_xor_rtn_b64 v[6:7], v1, v[2:3] offset:0
+// GFX1250: ds_xor_rtn_b64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xac,0xd9,0x01,0x02,0x00,0x06]
+
+ds_xor_rtn_b64 v[254:255], v255, v[254:255] offset:4
+// GFX1250: ds_xor_rtn_b64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0xac,0xd9,0xff,0xfe,0x00,0xfe]
+
+ds_swizzle_b32 v8, v2
+// GFX1250: ds_swizzle_b32 v8, v2 ; encoding: [0x00,0x00,0xd4,0xd8,0x02,0x00,0x00,0x08]
+
+ds_swizzle_b32 v8, v2 offset:0
+// GFX1250: ds_swizzle_b32 v8, v2 ; encoding: [0x00,0x00,0xd4,0xd8,0x02,0x00,0x00,0x08]
+
+ds_swizzle_b32 v8, v2 offset:0xFFFF
+// GFX1250: ds_swizzle_b32 v8, v2 offset:swizzle(FFT,31) ; encoding: [0xff,0xff,0xd4,0xd8,0x02,0x00,0x00,0x08]
+
+ds_swizzle_b32 v8, v2 offset:swizzle(QUAD_PERM, 0, 1, 2, 3)
+// GFX1250: ds_swizzle_b32 v8, v2 offset:swizzle(QUAD_PERM,0,1,2,3) ; encoding: [0xe4,0x80,0xd4,0xd8,0x02,0x00,0x00,0x08]
+
+ds_swizzle_b32 v8, v2 offset:swizzle(SWAP,16)
+// GFX1250: ds_swizzle_b32 v8, v2 offset:swizzle(SWAP,16) ; encoding: [0x1f,0x40,0xd4,0xd8,0x02,0x00,0x00,0x08]
+
+ds_swizzle_b32 v8, v2 offset:swizzle(REVERSE,8)
+// GFX1250: ds_swizzle_b32 v8, v2 offset:swizzle(REVERSE,8) ; encoding: [0x1f,0x1c,0xd4,0xd8,0x02,0x00,0x00,0x08]
+
+ds_swizzle_b32 v8, v2 offset:swizzle(BROADCAST,4,1)
+// GFX1250: ds_swizzle_b32 v8, v2 offset:swizzle(BROADCAST,4,1) ; encoding: [0x3c,0x00,0xd4,0xd8,0x02,0x00,0x00,0x08]
+
+ds_swizzle_b32 v8, v2 offset:swizzle(BROADCAST,8,7)
+// GFX1250: ds_swizzle_b32 v8, v2 offset:swizzle(BROADCAST,8,7) ; encoding: [0xf8,0x00,0xd4,0xd8,0x02,0x00,0x00,0x08]
+
+ds_swizzle_b32 v8, v2 offset:swizzle(BITMASK_PERM, "01pip")
+// GFX1250: ds_swizzle_b32 v8, v2 offset:swizzle(BITMASK_PERM,"01pip") ; encoding: [0x07,0x09,0xd4,0xd8,0x02,0x00,0x00,0x08]
+
ds_atomic_async_barrier_arrive_b64 v1 offset:65407
// GFX1250: ds_atomic_async_barrier_arrive_b64 v1 offset:65407 ; encoding: [0x7f,0xff,0x58,0xd9,0x01,0x00,0x00,0x00]
// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_features.s b/llvm/test/MC/AMDGPU/gfx1250_asm_features.s
new file mode 100644
index 0000000..013b790
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_features.s
@@ -0,0 +1,32 @@
+// RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -show-encoding %s | FileCheck --check-prefixes=GFX1250 %s
+
+//
+// Elements of CPol operand can be given in any order
+//
+
+s_load_b32 s4, s[2:3], 10 th:TH_LOAD_NT scope:SCOPE_SE nv
+// GFX1250: encoding: [0x01,0x01,0xb0,0xf4,0x0a,0x00,0x00,0xf8]
+
+s_load_b32 s4, s[2:3], 10 scope:SCOPE_SE nv th:TH_LOAD_NT
+// GFX1250: encoding: [0x01,0x01,0xb0,0xf4,0x0a,0x00,0x00,0xf8]
+
+s_load_b32 s4, s[2:3], 10 nv scope:SCOPE_SE th:TH_LOAD_NT
+// GFX1250: encoding: [0x01,0x01,0xb0,0xf4,0x0a,0x00,0x00,0xf8]
+
+buffer_load_b32 v5, v1, s[8:11], s3 offen offset:4095 th:TH_LOAD_NT scope:SCOPE_SE nv
+// GFX1250: encoding: [0x83,0x00,0x05,0xc4,0x05,0x10,0x94,0x40,0x01,0xff,0x0f,0x00]
+
+buffer_load_b32 v5, v1, s[8:11], s3 offen offset:4095 scope:SCOPE_SE nv th:TH_LOAD_NT
+// GFX1250: encoding: [0x83,0x00,0x05,0xc4,0x05,0x10,0x94,0x40,0x01,0xff,0x0f,0x00]
+
+buffer_load_b32 v5, v1, s[8:11], s3 offen offset:4095 nv scope:SCOPE_SE th:TH_LOAD_NT
+// GFX1250: encoding: [0x83,0x00,0x05,0xc4,0x05,0x10,0x94,0x40,0x01,0xff,0x0f,0x00]
+
+global_load_b32 v0, v[2:3], off th:TH_LOAD_NT scope:SCOPE_SE nv
+// GFX1250: encoding: [0xfc,0x00,0x05,0xee,0x00,0x00,0x14,0x00,0x02,0x00,0x00,0x00]
+
+global_load_b32 v0, v[2:3], off scope:SCOPE_SE nv th:TH_LOAD_NT
+// GFX1250: encoding: [0xfc,0x00,0x05,0xee,0x00,0x00,0x14,0x00,0x02,0x00,0x00,0x00]
+
+global_load_b32 v0, v[2:3], off nv scope:SCOPE_SE th:TH_LOAD_NT
+// GFX1250: encoding: [0xfc,0x00,0x05,0xee,0x00,0x00,0x14,0x00,0x02,0x00,0x00,0x00]
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_operands.s b/llvm/test/MC/AMDGPU/gfx1250_asm_operands.s
index 8b7465b..100fc98 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_operands.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_operands.s
@@ -27,3 +27,28 @@ s_mov_b64 s[0:1], src_shared_limit
s_getreg_b32 s1, hwreg(33)
// GFX1250: encoding: [0x21,0xf8,0x81,0xb8]
+
+s_getreg_b32 s1, hwreg(HW_REG_XNACK_STATE_PRIV)
+// GFX1200-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
+// GFX1250: encoding: [0x21,0xf8,0x81,0xb8]
+
+s_getreg_b32 s1, hwreg(34)
+// GFX1250: encoding: [0x22,0xf8,0x81,0xb8]
+
+s_getreg_b32 s1, hwreg(HW_REG_XNACK_MASK)
+// GFX1200-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
+// GFX1250: encoding: [0x22,0xf8,0x81,0xb8]
+
+s_setreg_b32 hwreg(33), s1
+// GFX1250: encoding: [0x21,0xf8,0x01,0xb9]
+
+s_setreg_b32 hwreg(HW_REG_XNACK_STATE_PRIV), s1
+// GFX1200-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
+// GFX1250: encoding: [0x21,0xf8,0x01,0xb9]
+
+s_setreg_b32 hwreg(34), s1
+// GFX1250: encoding: [0x22,0xf8,0x01,0xb9]
+
+s_setreg_b32 hwreg(HW_REG_XNACK_MASK), s1
+// GFX1200-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid hardware register: not supported on this GPU
+// GFX1250: encoding: [0x22,0xf8,0x01,0xb9]
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_sop1.s b/llvm/test/MC/AMDGPU/gfx1250_asm_sop1.s
index 41b6e93..aab8d9a 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_sop1.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_sop1.s
@@ -45,6 +45,10 @@ s_rfe_i64 s[2:3]
s_rfe_b64 s[2:3]
// GFX1250: s_rfe_i64 s[2:3] ; encoding: [0x02,0x4a,0x80,0xbe]
+s_get_shader_cycles_u64 s[2:3]
+// GFX1250: s_get_shader_cycles_u64 s[2:3] ; encoding: [0x00,0x06,0x82,0xbe]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
s_barrier_signal -3
// GFX1250: s_barrier_signal -3 ; encoding: [0xc3,0x4e,0x80,0xbe]
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_unsupported.s b/llvm/test/MC/AMDGPU/gfx1250_asm_unsupported.s
index 89bd507..7681a32 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_unsupported.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_unsupported.s
@@ -97,6 +97,20 @@ v_interp_p10_rtz_f16_f32 v0, v1, v2, v3
v_interp_p2_rtz_f16_f32 v0, v1, v2, v3
// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+;; *xf32
+
+v_mfma_f32_16x16x8_xf32 a[0:3], v[2:3], v[4:5], a[2:5]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+v_mfma_f32_16x16x8xf32 a[0:3], v[2:3], v[4:5], a[2:5]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+v_mfma_f32_32x32x4_xf32 a[0:15], v[2:3], v[4:5], a[18:33]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+v_mfma_f32_32x32x4xf32 a[0:15], v[2:3], v[4:5], a[18:33]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
;; Export, S_WAIT_EXPCNT and S_WAIT_EVENT
export mrt0 off, off, off, off
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vbuffer_mubuf.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vbuffer_mubuf.s
index 7a4da25..0b8f190 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vbuffer_mubuf.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vbuffer_mubuf.s
@@ -1,6 +1,2310 @@
// RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -show-encoding %s | FileCheck --check-prefix=GFX1250 %s
// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -show-encoding %s 2>&1 | FileCheck --check-prefix=GFX12-ERR --implicit-check-not=error: --strict-whitespace %s
+buffer_load_b32 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_b32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x05,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b32 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_b32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x05,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b32 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_load_b32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x00,0x05,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b32 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_load_b32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x00,0x05,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b32 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_load_b32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x00,0x05,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b32 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_load_b32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x00,0x05,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b32 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_load_b32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x00,0x05,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_load_b32 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_load_b32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x00,0x05,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_load_b32 v5, off, s[8:11], s3
+// GFX1250: buffer_load_b32 v5, off, s[8:11], s3 ; encoding: [0x03,0x00,0x05,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_b32 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_load_b32 v5, off, s[8:11], s3 ; encoding: [0x03,0x00,0x05,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_b32 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_load_b32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x05,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_load_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV
+// GFX1250: buffer_load_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0x00,0x05,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS
+// GFX1250: buffer_load_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0x00,0x05,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b64 v[6:7], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_b64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b64 v[254:255], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_b64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x05,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b64 v[6:7], off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_load_b64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x05,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b64 v[6:7], off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_load_b64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x05,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b64 v[6:7], off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_load_b64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b64 v[6:7], off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_load_b64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b64 v[6:7], v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_load_b64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x05,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_load_b64 v[6:7], v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_load_b64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x05,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_load_b64 v[6:7], off, s[8:11], s3
+// GFX1250: buffer_load_b64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x40,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_b64 v[6:7], off, s[8:11], s3 offset:0
+// GFX1250: buffer_load_b64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x40,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_b64 v[6:7], off, s[8:11], s3 offset:7
+// GFX1250: buffer_load_b64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_load_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV
+// GFX1250: buffer_load_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x05,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS
+// GFX1250: buffer_load_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0x40,0x05,0xc4,0x06,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b96 v[6:8], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_b96 v[6:8], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b96 v[252:254], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_b96 v[252:254], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x05,0xc4,0xfc,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b96 v[6:8], off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_load_b96 v[6:8], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x05,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b96 v[6:8], off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_load_b96 v[6:8], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x05,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b96 v[6:8], off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_load_b96 v[6:8], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b96 v[6:8], off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_load_b96 v[6:8], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b96 v[6:8], v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_load_b96 v[6:8], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x05,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_load_b96 v[6:8], v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_load_b96 v[6:8], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x05,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_load_b96 v[6:8], off, s[8:11], s3
+// GFX1250: buffer_load_b96 v[6:8], off, s[8:11], s3 ; encoding: [0x03,0x80,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_b96 v[6:8], off, s[8:11], s3 offset:0
+// GFX1250: buffer_load_b96 v[6:8], off, s[8:11], s3 ; encoding: [0x03,0x80,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_b96 v[6:8], off, s[8:11], s3 offset:7
+// GFX1250: buffer_load_b96 v[6:8], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_load_b96 v[6:8], off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV
+// GFX1250: buffer_load_b96 v[6:8], off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x05,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b96 v[6:8], off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS
+// GFX1250: buffer_load_b96 v[6:8], off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0x80,0x05,0xc4,0x06,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b128 v[6:9], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_b128 v[6:9], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b128 v[252:255], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_b128 v[252:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x05,0xc4,0xfc,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b128 v[6:9], off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_load_b128 v[6:9], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0xc0,0x05,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b128 v[6:9], off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_load_b128 v[6:9], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0xc0,0x05,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b128 v[6:9], off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_load_b128 v[6:9], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0xc0,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b128 v[6:9], off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_load_b128 v[6:9], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0xc0,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b128 v[6:9], v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_load_b128 v[6:9], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0xc0,0x05,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_load_b128 v[6:9], v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_load_b128 v[6:9], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0xc0,0x05,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_load_b128 v[6:9], off, s[8:11], s3
+// GFX1250: buffer_load_b128 v[6:9], off, s[8:11], s3 ; encoding: [0x03,0xc0,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_b128 v[6:9], off, s[8:11], s3 offset:0
+// GFX1250: buffer_load_b128 v[6:9], off, s[8:11], s3 ; encoding: [0x03,0xc0,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_b128 v[6:9], off, s[8:11], s3 offset:7
+// GFX1250: buffer_load_b128 v[6:9], off, s[8:11], s3 offset:7 ; encoding: [0x03,0xc0,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_load_b128 v[6:9], off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV
+// GFX1250: buffer_load_b128 v[6:9], off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x05,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_b128 v[6:9], off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS
+// GFX1250: buffer_load_b128 v[6:9], off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0xc0,0x05,0xc4,0x06,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_b16 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_d16_b16 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_b16 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_d16_b16 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x08,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_b16 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_load_d16_b16 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x00,0x08,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_b16 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_load_d16_b16 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x00,0x08,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_b16 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_load_d16_b16 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x00,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_b16 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_load_d16_b16 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x00,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_b16 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_load_d16_b16 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x00,0x08,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_b16 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_load_d16_b16 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x00,0x08,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_b16 v5, off, s[8:11], s3
+// GFX1250: buffer_load_d16_b16 v5, off, s[8:11], s3 ; encoding: [0x03,0x00,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_d16_b16 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_load_d16_b16 v5, off, s[8:11], s3 ; encoding: [0x03,0x00,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_d16_b16 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_load_d16_b16 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_load_d16_b16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV
+// GFX1250: buffer_load_d16_b16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0x00,0x08,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_b16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS
+// GFX1250: buffer_load_d16_b16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0x00,0x08,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_b16 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_d16_hi_b16 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_b16 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_d16_hi_b16 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x08,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_b16 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_load_d16_hi_b16 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0xc0,0x08,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_b16 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_load_d16_hi_b16 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0xc0,0x08,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_b16 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_load_d16_hi_b16 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0xc0,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_b16 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_load_d16_hi_b16 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0xc0,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_b16 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_load_d16_hi_b16 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0xc0,0x08,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_b16 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_load_d16_hi_b16 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0xc0,0x08,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_b16 v5, off, s[8:11], s3
+// GFX1250: buffer_load_d16_hi_b16 v5, off, s[8:11], s3 ; encoding: [0x03,0xc0,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_d16_hi_b16 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_load_d16_hi_b16 v5, off, s[8:11], s3 ; encoding: [0x03,0xc0,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_d16_hi_b16 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_load_d16_hi_b16 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0xc0,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_load_d16_hi_b16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV
+// GFX1250: buffer_load_d16_hi_b16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x08,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_b16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS
+// GFX1250: buffer_load_d16_hi_b16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0xc0,0x08,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_i8 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_d16_hi_i8 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_i8 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_d16_hi_i8 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x08,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_i8 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_load_d16_hi_i8 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x08,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_i8 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_load_d16_hi_i8 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x08,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_i8 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_load_d16_hi_i8 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_i8 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_load_d16_hi_i8 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_i8 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_load_d16_hi_i8 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x08,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_i8 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_load_d16_hi_i8 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x08,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_i8 v5, off, s[8:11], s3
+// GFX1250: buffer_load_d16_hi_i8 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_d16_hi_i8 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_load_d16_hi_i8 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_d16_hi_i8 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_load_d16_hi_i8 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_load_d16_hi_i8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV
+// GFX1250: buffer_load_d16_hi_i8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x08,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_i8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS
+// GFX1250: buffer_load_d16_hi_i8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0x80,0x08,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_u8 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_d16_hi_u8 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_u8 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_d16_hi_u8 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x08,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_u8 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_load_d16_hi_u8 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x08,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_u8 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_load_d16_hi_u8 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x08,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_u8 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_load_d16_hi_u8 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_u8 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_load_d16_hi_u8 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_u8 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_load_d16_hi_u8 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x08,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_u8 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_load_d16_hi_u8 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x08,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_u8 v5, off, s[8:11], s3
+// GFX1250: buffer_load_d16_hi_u8 v5, off, s[8:11], s3 ; encoding: [0x03,0x40,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_d16_hi_u8 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_load_d16_hi_u8 v5, off, s[8:11], s3 ; encoding: [0x03,0x40,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_d16_hi_u8 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_load_d16_hi_u8 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_load_d16_hi_u8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV
+// GFX1250: buffer_load_d16_hi_u8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x08,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_hi_u8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS
+// GFX1250: buffer_load_d16_hi_u8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0x40,0x08,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_i8 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_d16_i8 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_i8 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_d16_i8 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x07,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_i8 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_load_d16_i8 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0xc0,0x07,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_i8 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_load_d16_i8 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0xc0,0x07,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_i8 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_load_d16_i8 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0xc0,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_i8 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_load_d16_i8 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0xc0,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_i8 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_load_d16_i8 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0xc0,0x07,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_i8 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_load_d16_i8 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0xc0,0x07,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_i8 v5, off, s[8:11], s3
+// GFX1250: buffer_load_d16_i8 v5, off, s[8:11], s3 ; encoding: [0x03,0xc0,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_d16_i8 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_load_d16_i8 v5, off, s[8:11], s3 ; encoding: [0x03,0xc0,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_d16_i8 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_load_d16_i8 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0xc0,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_load_d16_i8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV
+// GFX1250: buffer_load_d16_i8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x07,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_i8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS
+// GFX1250: buffer_load_d16_i8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0xc0,0x07,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_u8 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_d16_u8 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_u8 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_d16_u8 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x07,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_u8 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_load_d16_u8 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x07,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_u8 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_load_d16_u8 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x07,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_u8 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_load_d16_u8 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_u8 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_load_d16_u8 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_u8 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_load_d16_u8 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x07,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_u8 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_load_d16_u8 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x07,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_u8 v5, off, s[8:11], s3
+// GFX1250: buffer_load_d16_u8 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_d16_u8 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_load_d16_u8 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_d16_u8 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_load_d16_u8 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_load_d16_u8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV
+// GFX1250: buffer_load_d16_u8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x07,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_d16_u8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS
+// GFX1250: buffer_load_d16_u8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0x80,0x07,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_i8 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_i8 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_i8 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_i8 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x04,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_i8 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_load_i8 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x04,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_i8 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_load_i8 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x04,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_i8 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_load_i8 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_i8 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_load_i8 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_i8 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_load_i8 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x04,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_load_i8 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_load_i8 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x04,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_load_i8 v5, off, s[8:11], s3
+// GFX1250: buffer_load_i8 v5, off, s[8:11], s3 ; encoding: [0x03,0x40,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_i8 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_load_i8 v5, off, s[8:11], s3 ; encoding: [0x03,0x40,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_i8 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_load_i8 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_load_i8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV
+// GFX1250: buffer_load_i8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x04,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_i8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS
+// GFX1250: buffer_load_i8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0x40,0x04,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_i16 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_i16 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_i16 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_i16 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x04,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_i16 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_load_i16 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0xc0,0x04,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_i16 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_load_i16 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0xc0,0x04,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_i16 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_load_i16 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0xc0,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_i16 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_load_i16 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0xc0,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_i16 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_load_i16 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0xc0,0x04,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_load_i16 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_load_i16 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0xc0,0x04,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_load_i16 v5, off, s[8:11], s3
+// GFX1250: buffer_load_i16 v5, off, s[8:11], s3 ; encoding: [0x03,0xc0,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_i16 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_load_i16 v5, off, s[8:11], s3 ; encoding: [0x03,0xc0,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_i16 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_load_i16 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0xc0,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_load_i16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV
+// GFX1250: buffer_load_i16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x04,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_i16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS
+// GFX1250: buffer_load_i16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0xc0,0x04,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_u8 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_u8 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_u8 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_u8 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x04,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_u8 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_load_u8 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x00,0x04,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_u8 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_load_u8 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x00,0x04,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_u8 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_load_u8 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x00,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_u8 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_load_u8 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x00,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_u8 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_load_u8 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x00,0x04,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_load_u8 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_load_u8 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x00,0x04,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_load_u8 v5, off, s[8:11], s3
+// GFX1250: buffer_load_u8 v5, off, s[8:11], s3 ; encoding: [0x03,0x00,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_u8 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_load_u8 v5, off, s[8:11], s3 ; encoding: [0x03,0x00,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_u8 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_load_u8 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_load_u8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV
+// GFX1250: buffer_load_u8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0x00,0x04,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_u8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS
+// GFX1250: buffer_load_u8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0x00,0x04,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_u16 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_u16 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_u16 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_load_u16 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x04,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_u16 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_load_u16 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x04,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_u16 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_load_u16 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x04,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_u16 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_load_u16 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_u16 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_load_u16 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_u16 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_load_u16 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x04,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_load_u16 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_load_u16 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x04,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_load_u16 v5, off, s[8:11], s3
+// GFX1250: buffer_load_u16 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_u16 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_load_u16 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_load_u16 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_load_u16 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_load_u16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV
+// GFX1250: buffer_load_u16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x04,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_load_u16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS
+// GFX1250: buffer_load_u16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0x80,0x04,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b8 v1, off, s[12:15], s4 offset:8388607
+// GFX1250: buffer_store_b8 v1, off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x00,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b8 v255, off, s[12:15], s4 offset:8388607
+// GFX1250: buffer_store_b8 v255, off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x00,0x06,0xc4,0xff,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b8 v1, off, s[16:19], s4 offset:8388607
+// GFX1250: buffer_store_b8 v1, off, s[16:19], s4 offset:8388607 ; encoding: [0x04,0x00,0x06,0xc4,0x01,0x20,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b8 v1, off, s[96:99], s4 offset:8388607
+// GFX1250: buffer_store_b8 v1, off, s[96:99], s4 offset:8388607 ; encoding: [0x04,0x00,0x06,0xc4,0x01,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b8 v1, off, s[12:15], s101 offset:8388607
+// GFX1250: buffer_store_b8 v1, off, s[12:15], s101 offset:8388607 ; encoding: [0x65,0x00,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b8 v1, off, s[12:15], m0 offset:8388607
+// GFX1250: buffer_store_b8 v1, off, s[12:15], m0 offset:8388607 ; encoding: [0x7d,0x00,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b8 v1, v0, s[12:15], s4 idxen offset:8388607
+// GFX1250: buffer_store_b8 v1, v0, s[12:15], s4 idxen offset:8388607 ; encoding: [0x04,0x00,0x06,0xc4,0x01,0x18,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_store_b8 v1, v0, s[12:15], s4 offen offset:8388607
+// GFX1250: buffer_store_b8 v1, v0, s[12:15], s4 offen offset:8388607 ; encoding: [0x04,0x00,0x06,0xc4,0x01,0x18,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_store_b8 v1, off, s[12:15], s4
+// GFX1250: buffer_store_b8 v1, off, s[12:15], s4 ; encoding: [0x04,0x00,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_store_b8 v1, off, s[12:15], s4 offset:0
+// GFX1250: buffer_store_b8 v1, off, s[12:15], s4 ; encoding: [0x04,0x00,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_store_b8 v1, off, s[12:15], s4 offset:7
+// GFX1250: buffer_store_b8 v1, off, s[12:15], s4 offset:7 ; encoding: [0x04,0x00,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_store_b8 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV
+// GFX1250: buffer_store_b8 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV ; encoding: [0x04,0x00,0x06,0xc4,0x01,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b8 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS
+// GFX1250: buffer_store_b8 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS ; encoding: [0x04,0x00,0x06,0xc4,0x01,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b16 v1, off, s[12:15], s4 offset:8388607
+// GFX1250: buffer_store_b16 v1, off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x40,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b16 v255, off, s[12:15], s4 offset:8388607
+// GFX1250: buffer_store_b16 v255, off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x40,0x06,0xc4,0xff,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b16 v1, off, s[16:19], s4 offset:8388607
+// GFX1250: buffer_store_b16 v1, off, s[16:19], s4 offset:8388607 ; encoding: [0x04,0x40,0x06,0xc4,0x01,0x20,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b16 v1, off, s[96:99], s4 offset:8388607
+// GFX1250: buffer_store_b16 v1, off, s[96:99], s4 offset:8388607 ; encoding: [0x04,0x40,0x06,0xc4,0x01,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b16 v1, off, s[12:15], s101 offset:8388607
+// GFX1250: buffer_store_b16 v1, off, s[12:15], s101 offset:8388607 ; encoding: [0x65,0x40,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b16 v1, off, s[12:15], m0 offset:8388607
+// GFX1250: buffer_store_b16 v1, off, s[12:15], m0 offset:8388607 ; encoding: [0x7d,0x40,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b16 v1, v0, s[12:15], s4 idxen offset:8388607
+// GFX1250: buffer_store_b16 v1, v0, s[12:15], s4 idxen offset:8388607 ; encoding: [0x04,0x40,0x06,0xc4,0x01,0x18,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_store_b16 v1, v0, s[12:15], s4 offen offset:8388607
+// GFX1250: buffer_store_b16 v1, v0, s[12:15], s4 offen offset:8388607 ; encoding: [0x04,0x40,0x06,0xc4,0x01,0x18,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_store_b16 v1, off, s[12:15], s4
+// GFX1250: buffer_store_b16 v1, off, s[12:15], s4 ; encoding: [0x04,0x40,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_store_b16 v1, off, s[12:15], s4 offset:0
+// GFX1250: buffer_store_b16 v1, off, s[12:15], s4 ; encoding: [0x04,0x40,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_store_b16 v1, off, s[12:15], s4 offset:7
+// GFX1250: buffer_store_b16 v1, off, s[12:15], s4 offset:7 ; encoding: [0x04,0x40,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_store_b16 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV
+// GFX1250: buffer_store_b16 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV ; encoding: [0x04,0x40,0x06,0xc4,0x01,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b16 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS
+// GFX1250: buffer_store_b16 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS ; encoding: [0x04,0x40,0x06,0xc4,0x01,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b32 v1, off, s[12:15], s4 offset:8388607
+// GFX1250: buffer_store_b32 v1, off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x80,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b32 v255, off, s[12:15], s4 offset:8388607
+// GFX1250: buffer_store_b32 v255, off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x80,0x06,0xc4,0xff,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b32 v1, off, s[16:19], s4 offset:8388607
+// GFX1250: buffer_store_b32 v1, off, s[16:19], s4 offset:8388607 ; encoding: [0x04,0x80,0x06,0xc4,0x01,0x20,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b32 v1, off, s[96:99], s4 offset:8388607
+// GFX1250: buffer_store_b32 v1, off, s[96:99], s4 offset:8388607 ; encoding: [0x04,0x80,0x06,0xc4,0x01,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b32 v1, off, s[12:15], s101 offset:8388607
+// GFX1250: buffer_store_b32 v1, off, s[12:15], s101 offset:8388607 ; encoding: [0x65,0x80,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b32 v1, off, s[12:15], m0 offset:8388607
+// GFX1250: buffer_store_b32 v1, off, s[12:15], m0 offset:8388607 ; encoding: [0x7d,0x80,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b32 v1, v0, s[12:15], s4 idxen offset:8388607
+// GFX1250: buffer_store_b32 v1, v0, s[12:15], s4 idxen offset:8388607 ; encoding: [0x04,0x80,0x06,0xc4,0x01,0x18,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_store_b32 v1, v0, s[12:15], s4 offen offset:8388607
+// GFX1250: buffer_store_b32 v1, v0, s[12:15], s4 offen offset:8388607 ; encoding: [0x04,0x80,0x06,0xc4,0x01,0x18,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_store_b32 v1, off, s[12:15], s4
+// GFX1250: buffer_store_b32 v1, off, s[12:15], s4 ; encoding: [0x04,0x80,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_store_b32 v1, off, s[12:15], s4 offset:0
+// GFX1250: buffer_store_b32 v1, off, s[12:15], s4 ; encoding: [0x04,0x80,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_store_b32 v1, off, s[12:15], s4 offset:7
+// GFX1250: buffer_store_b32 v1, off, s[12:15], s4 offset:7 ; encoding: [0x04,0x80,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_store_b32 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV
+// GFX1250: buffer_store_b32 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV ; encoding: [0x04,0x80,0x06,0xc4,0x01,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b32 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS
+// GFX1250: buffer_store_b32 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS ; encoding: [0x04,0x80,0x06,0xc4,0x01,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b64 v[2:3], off, s[12:15], s4 offset:8388607
+// GFX1250: buffer_store_b64 v[2:3], off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0xc0,0x06,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b64 v[254:255], off, s[12:15], s4 offset:8388607
+// GFX1250: buffer_store_b64 v[254:255], off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0xc0,0x06,0xc4,0xfe,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b64 v[2:3], off, s[16:19], s4 offset:8388607
+// GFX1250: buffer_store_b64 v[2:3], off, s[16:19], s4 offset:8388607 ; encoding: [0x04,0xc0,0x06,0xc4,0x02,0x20,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b64 v[2:3], off, s[96:99], s4 offset:8388607
+// GFX1250: buffer_store_b64 v[2:3], off, s[96:99], s4 offset:8388607 ; encoding: [0x04,0xc0,0x06,0xc4,0x02,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b64 v[2:3], off, s[12:15], s101 offset:8388607
+// GFX1250: buffer_store_b64 v[2:3], off, s[12:15], s101 offset:8388607 ; encoding: [0x65,0xc0,0x06,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b64 v[2:3], off, s[12:15], m0 offset:8388607
+// GFX1250: buffer_store_b64 v[2:3], off, s[12:15], m0 offset:8388607 ; encoding: [0x7d,0xc0,0x06,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b64 v[2:3], v0, s[12:15], s4 idxen offset:8388607
+// GFX1250: buffer_store_b64 v[2:3], v0, s[12:15], s4 idxen offset:8388607 ; encoding: [0x04,0xc0,0x06,0xc4,0x02,0x18,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_store_b64 v[2:3], v0, s[12:15], s4 offen offset:8388607
+// GFX1250: buffer_store_b64 v[2:3], v0, s[12:15], s4 offen offset:8388607 ; encoding: [0x04,0xc0,0x06,0xc4,0x02,0x18,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_store_b64 v[2:3], off, s[12:15], s4
+// GFX1250: buffer_store_b64 v[2:3], off, s[12:15], s4 ; encoding: [0x04,0xc0,0x06,0xc4,0x02,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_store_b64 v[2:3], off, s[12:15], s4 offset:0
+// GFX1250: buffer_store_b64 v[2:3], off, s[12:15], s4 ; encoding: [0x04,0xc0,0x06,0xc4,0x02,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_store_b64 v[2:3], off, s[12:15], s4 offset:7
+// GFX1250: buffer_store_b64 v[2:3], off, s[12:15], s4 offset:7 ; encoding: [0x04,0xc0,0x06,0xc4,0x02,0x18,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_store_b64 v[2:3], off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV
+// GFX1250: buffer_store_b64 v[2:3], off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV ; encoding: [0x04,0xc0,0x06,0xc4,0x02,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b64 v[2:3], off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS
+// GFX1250: buffer_store_b64 v[2:3], off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS ; encoding: [0x04,0xc0,0x06,0xc4,0x02,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b96 v[2:4], off, s[12:15], s4 offset:8388607
+// GFX1250: buffer_store_b96 v[2:4], off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x00,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b96 v[252:254], off, s[12:15], s4 offset:8388607
+// GFX1250: buffer_store_b96 v[252:254], off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x00,0x07,0xc4,0xfc,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b96 v[2:4], off, s[16:19], s4 offset:8388607
+// GFX1250: buffer_store_b96 v[2:4], off, s[16:19], s4 offset:8388607 ; encoding: [0x04,0x00,0x07,0xc4,0x02,0x20,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b96 v[2:4], off, s[96:99], s4 offset:8388607
+// GFX1250: buffer_store_b96 v[2:4], off, s[96:99], s4 offset:8388607 ; encoding: [0x04,0x00,0x07,0xc4,0x02,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b96 v[2:4], off, s[12:15], s101 offset:8388607
+// GFX1250: buffer_store_b96 v[2:4], off, s[12:15], s101 offset:8388607 ; encoding: [0x65,0x00,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b96 v[2:4], off, s[12:15], m0 offset:8388607
+// GFX1250: buffer_store_b96 v[2:4], off, s[12:15], m0 offset:8388607 ; encoding: [0x7d,0x00,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b96 v[2:4], v0, s[12:15], s4 idxen offset:8388607
+// GFX1250: buffer_store_b96 v[2:4], v0, s[12:15], s4 idxen offset:8388607 ; encoding: [0x04,0x00,0x07,0xc4,0x02,0x18,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_store_b96 v[2:4], v0, s[12:15], s4 offen offset:8388607
+// GFX1250: buffer_store_b96 v[2:4], v0, s[12:15], s4 offen offset:8388607 ; encoding: [0x04,0x00,0x07,0xc4,0x02,0x18,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_store_b96 v[2:4], off, s[12:15], s4
+// GFX1250: buffer_store_b96 v[2:4], off, s[12:15], s4 ; encoding: [0x04,0x00,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_store_b96 v[2:4], off, s[12:15], s4 offset:0
+// GFX1250: buffer_store_b96 v[2:4], off, s[12:15], s4 ; encoding: [0x04,0x00,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_store_b96 v[2:4], off, s[12:15], s4 offset:7
+// GFX1250: buffer_store_b96 v[2:4], off, s[12:15], s4 offset:7 ; encoding: [0x04,0x00,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_store_b96 v[2:4], off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV
+// GFX1250: buffer_store_b96 v[2:4], off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV ; encoding: [0x04,0x00,0x07,0xc4,0x02,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b96 v[2:4], off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS
+// GFX1250: buffer_store_b96 v[2:4], off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS ; encoding: [0x04,0x00,0x07,0xc4,0x02,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b128 v[2:5], off, s[12:15], s4 offset:8388607
+// GFX1250: buffer_store_b128 v[2:5], off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x40,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b128 v[252:255], off, s[12:15], s4 offset:8388607
+// GFX1250: buffer_store_b128 v[252:255], off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x40,0x07,0xc4,0xfc,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b128 v[2:5], off, s[16:19], s4 offset:8388607
+// GFX1250: buffer_store_b128 v[2:5], off, s[16:19], s4 offset:8388607 ; encoding: [0x04,0x40,0x07,0xc4,0x02,0x20,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b128 v[2:5], off, s[96:99], s4 offset:8388607
+// GFX1250: buffer_store_b128 v[2:5], off, s[96:99], s4 offset:8388607 ; encoding: [0x04,0x40,0x07,0xc4,0x02,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b128 v[2:5], off, s[12:15], s101 offset:8388607
+// GFX1250: buffer_store_b128 v[2:5], off, s[12:15], s101 offset:8388607 ; encoding: [0x65,0x40,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b128 v[2:5], off, s[12:15], m0 offset:8388607
+// GFX1250: buffer_store_b128 v[2:5], off, s[12:15], m0 offset:8388607 ; encoding: [0x7d,0x40,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b128 v[2:5], v0, s[12:15], s4 idxen offset:8388607
+// GFX1250: buffer_store_b128 v[2:5], v0, s[12:15], s4 idxen offset:8388607 ; encoding: [0x04,0x40,0x07,0xc4,0x02,0x18,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_store_b128 v[2:5], v0, s[12:15], s4 offen offset:8388607
+// GFX1250: buffer_store_b128 v[2:5], v0, s[12:15], s4 offen offset:8388607 ; encoding: [0x04,0x40,0x07,0xc4,0x02,0x18,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_store_b128 v[2:5], off, s[12:15], s4
+// GFX1250: buffer_store_b128 v[2:5], off, s[12:15], s4 ; encoding: [0x04,0x40,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_store_b128 v[2:5], off, s[12:15], s4 offset:0
+// GFX1250: buffer_store_b128 v[2:5], off, s[12:15], s4 ; encoding: [0x04,0x40,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_store_b128 v[2:5], off, s[12:15], s4 offset:7
+// GFX1250: buffer_store_b128 v[2:5], off, s[12:15], s4 offset:7 ; encoding: [0x04,0x40,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_store_b128 v[2:5], off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV
+// GFX1250: buffer_store_b128 v[2:5], off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV ; encoding: [0x04,0x40,0x07,0xc4,0x02,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_b128 v[2:5], off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS
+// GFX1250: buffer_store_b128 v[2:5], off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS ; encoding: [0x04,0x40,0x07,0xc4,0x02,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_d16_hi_b8 v1, off, s[12:15], s4 offset:8388607
+// GFX1250: buffer_store_d16_hi_b8 v1, off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x00,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_d16_hi_b8 v255, off, s[12:15], s4 offset:8388607
+// GFX1250: buffer_store_d16_hi_b8 v255, off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x00,0x09,0xc4,0xff,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_d16_hi_b8 v1, off, s[16:19], s4 offset:8388607
+// GFX1250: buffer_store_d16_hi_b8 v1, off, s[16:19], s4 offset:8388607 ; encoding: [0x04,0x00,0x09,0xc4,0x01,0x20,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_d16_hi_b8 v1, off, s[96:99], s4 offset:8388607
+// GFX1250: buffer_store_d16_hi_b8 v1, off, s[96:99], s4 offset:8388607 ; encoding: [0x04,0x00,0x09,0xc4,0x01,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_d16_hi_b8 v1, off, s[12:15], s101 offset:8388607
+// GFX1250: buffer_store_d16_hi_b8 v1, off, s[12:15], s101 offset:8388607 ; encoding: [0x65,0x00,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_d16_hi_b8 v1, off, s[12:15], m0 offset:8388607
+// GFX1250: buffer_store_d16_hi_b8 v1, off, s[12:15], m0 offset:8388607 ; encoding: [0x7d,0x00,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_d16_hi_b8 v1, v0, s[12:15], s4 idxen offset:8388607
+// GFX1250: buffer_store_d16_hi_b8 v1, v0, s[12:15], s4 idxen offset:8388607 ; encoding: [0x04,0x00,0x09,0xc4,0x01,0x18,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_store_d16_hi_b8 v1, v0, s[12:15], s4 offen offset:8388607
+// GFX1250: buffer_store_d16_hi_b8 v1, v0, s[12:15], s4 offen offset:8388607 ; encoding: [0x04,0x00,0x09,0xc4,0x01,0x18,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_store_d16_hi_b8 v1, off, s[12:15], s4
+// GFX1250: buffer_store_d16_hi_b8 v1, off, s[12:15], s4 ; encoding: [0x04,0x00,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_store_d16_hi_b8 v1, off, s[12:15], s4 offset:0
+// GFX1250: buffer_store_d16_hi_b8 v1, off, s[12:15], s4 ; encoding: [0x04,0x00,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_store_d16_hi_b8 v1, off, s[12:15], s4 offset:7
+// GFX1250: buffer_store_d16_hi_b8 v1, off, s[12:15], s4 offset:7 ; encoding: [0x04,0x00,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_store_d16_hi_b8 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV
+// GFX1250: buffer_store_d16_hi_b8 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV ; encoding: [0x04,0x00,0x09,0xc4,0x01,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_d16_hi_b8 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS
+// GFX1250: buffer_store_d16_hi_b8 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS ; encoding: [0x04,0x00,0x09,0xc4,0x01,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_d16_hi_b16 v1, off, s[12:15], s4 offset:8388607
+// GFX1250: buffer_store_d16_hi_b16 v1, off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x40,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_d16_hi_b16 v255, off, s[12:15], s4 offset:8388607
+// GFX1250: buffer_store_d16_hi_b16 v255, off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x40,0x09,0xc4,0xff,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_d16_hi_b16 v1, off, s[16:19], s4 offset:8388607
+// GFX1250: buffer_store_d16_hi_b16 v1, off, s[16:19], s4 offset:8388607 ; encoding: [0x04,0x40,0x09,0xc4,0x01,0x20,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_d16_hi_b16 v1, off, s[96:99], s4 offset:8388607
+// GFX1250: buffer_store_d16_hi_b16 v1, off, s[96:99], s4 offset:8388607 ; encoding: [0x04,0x40,0x09,0xc4,0x01,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_d16_hi_b16 v1, off, s[12:15], s101 offset:8388607
+// GFX1250: buffer_store_d16_hi_b16 v1, off, s[12:15], s101 offset:8388607 ; encoding: [0x65,0x40,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_d16_hi_b16 v1, off, s[12:15], m0 offset:8388607
+// GFX1250: buffer_store_d16_hi_b16 v1, off, s[12:15], m0 offset:8388607 ; encoding: [0x7d,0x40,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_d16_hi_b16 v1, v0, s[12:15], s4 idxen offset:8388607
+// GFX1250: buffer_store_d16_hi_b16 v1, v0, s[12:15], s4 idxen offset:8388607 ; encoding: [0x04,0x40,0x09,0xc4,0x01,0x18,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_store_d16_hi_b16 v1, v0, s[12:15], s4 offen offset:8388607
+// GFX1250: buffer_store_d16_hi_b16 v1, v0, s[12:15], s4 offen offset:8388607 ; encoding: [0x04,0x40,0x09,0xc4,0x01,0x18,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_store_d16_hi_b16 v1, off, s[12:15], s4
+// GFX1250: buffer_store_d16_hi_b16 v1, off, s[12:15], s4 ; encoding: [0x04,0x40,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_store_d16_hi_b16 v1, off, s[12:15], s4 offset:0
+// GFX1250: buffer_store_d16_hi_b16 v1, off, s[12:15], s4 ; encoding: [0x04,0x40,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_store_d16_hi_b16 v1, off, s[12:15], s4 offset:7
+// GFX1250: buffer_store_d16_hi_b16 v1, off, s[12:15], s4 offset:7 ; encoding: [0x04,0x40,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_store_d16_hi_b16 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV
+// GFX1250: buffer_store_d16_hi_b16 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV ; encoding: [0x04,0x40,0x09,0xc4,0x01,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_store_d16_hi_b16 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS
+// GFX1250: buffer_store_d16_hi_b16 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS ; encoding: [0x04,0x40,0x09,0xc4,0x01,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_pk_add_f16 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_pk_add_f16 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_pk_add_f16 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_pk_add_f16 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x16,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_pk_add_f16 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_pk_add_f16 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x16,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_pk_add_f16 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_pk_add_f16 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x16,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_pk_add_f16 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_pk_add_f16 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_pk_add_f16 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_pk_add_f16 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_pk_add_f16 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_pk_add_f16 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x16,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_pk_add_f16 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_pk_add_f16 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x16,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_pk_add_f16 v5, off, s[8:11], s3
+// GFX1250: buffer_atomic_pk_add_f16 v5, off, s[8:11], s3 ; encoding: [0x03,0x40,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_pk_add_f16 v5, off, s[8:11], s3
+// GFX1250: buffer_atomic_pk_add_f16 v5, off, s[8:11], s3 ; encoding: [0x03,0x40,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_pk_add_f16 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_pk_add_f16 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_pk_add_f16 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_pk_add_f16 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x40,0x16,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_pk_add_f16 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_pk_add_f16 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x40,0x16,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_pk_add_f16 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_pk_add_f16 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x16,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_pk_add_bf16 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_pk_add_bf16 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_pk_add_bf16 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_pk_add_bf16 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x16,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_pk_add_bf16 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_pk_add_bf16 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x16,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_pk_add_bf16 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_pk_add_bf16 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x16,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_pk_add_bf16 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_pk_add_bf16 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_pk_add_bf16 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_pk_add_bf16 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_pk_add_bf16 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_pk_add_bf16 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x16,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_pk_add_bf16 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_pk_add_bf16 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x16,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_pk_add_bf16 v5, off, s[8:11], s3
+// GFX1250: buffer_atomic_pk_add_bf16 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_pk_add_bf16 v5, off, s[8:11], s3
+// GFX1250: buffer_atomic_pk_add_bf16 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_pk_add_bf16 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_pk_add_bf16 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_pk_add_bf16 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_pk_add_bf16 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x80,0x16,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_pk_add_bf16 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_pk_add_bf16 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x80,0x16,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_pk_add_bf16 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_pk_add_bf16 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x16,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_f32 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_add_f32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x15,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_f32 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_add_f32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x15,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_f32 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_add_f32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x15,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_f32 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_add_f32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x15,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_f32 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_add_f32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x15,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_f32 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_add_f32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x15,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_f32 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_add_f32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x15,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_f32 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_add_f32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x15,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_f32 v5, off, s[8:11], s3
+// GFX1250: buffer_atomic_add_f32 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x15,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_add_f32 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_add_f32 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x15,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_add_f32 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_add_f32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x15,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_add_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_add_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x80,0x15,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_add_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x80,0x15,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_add_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x15,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_u32 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_add_u32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_u32 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_add_u32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x0d,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_u32 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_add_u32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x0d,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_u32 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_add_u32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x0d,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_u32 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_add_u32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_u32 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_add_u32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_u32 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_add_u32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x0d,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_u32 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_add_u32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x0d,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_u32 v5, off, s[8:11], s3
+// GFX1250: buffer_atomic_add_u32 v5, off, s[8:11], s3 ; encoding: [0x03,0x40,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_add_u32 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_add_u32 v5, off, s[8:11], s3 ; encoding: [0x03,0x40,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_add_u32 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_add_u32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_add_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_add_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x40,0x0d,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_add_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x40,0x0d,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_add_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x0d,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_u64 v[6:7], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_add_u64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_u64 v[254:255], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_add_u64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x10,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_u64 v[6:7], off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_add_u64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0xc0,0x10,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_u64 v[6:7], off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_add_u64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0xc0,0x10,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_u64 v[6:7], off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_add_u64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0xc0,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_u64 v[6:7], off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_add_u64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0xc0,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_u64 v[6:7], v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_add_u64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0xc0,0x10,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_u64 v[6:7], v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_add_u64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0xc0,0x10,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_u64 v[6:7], off, s[8:11], s3
+// GFX1250: buffer_atomic_add_u64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0xc0,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_add_u64 v[6:7], off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_add_u64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0xc0,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_add_u64 v[6:7], off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_add_u64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0xc0,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_add_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_add_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x10,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_add_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x10,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_add_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_add_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x10,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_and_b32 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_and_b32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_and_b32 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_and_b32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x0f,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_and_b32 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_and_b32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x00,0x0f,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_and_b32 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_and_b32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x00,0x0f,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_and_b32 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_and_b32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x00,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_and_b32 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_and_b32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x00,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_and_b32 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_and_b32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x00,0x0f,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_and_b32 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_and_b32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x00,0x0f,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_and_b32 v5, off, s[8:11], s3
+// GFX1250: buffer_atomic_and_b32 v5, off, s[8:11], s3 ; encoding: [0x03,0x00,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_and_b32 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_and_b32 v5, off, s[8:11], s3 ; encoding: [0x03,0x00,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_and_b32 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_and_b32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_and_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_and_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x00,0x0f,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_and_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_and_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x00,0x0f,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_and_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_and_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x00,0x0f,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_and_b64 v[6:7], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_and_b64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_and_b64 v[254:255], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_and_b64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x12,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_and_b64 v[6:7], off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_and_b64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x12,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_and_b64 v[6:7], off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_and_b64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x12,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_and_b64 v[6:7], off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_and_b64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_and_b64 v[6:7], off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_and_b64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_and_b64 v[6:7], v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_and_b64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x12,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_and_b64 v[6:7], v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_and_b64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x12,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_and_b64 v[6:7], off, s[8:11], s3
+// GFX1250: buffer_atomic_and_b64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x40,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_and_b64 v[6:7], off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_and_b64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x40,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_and_b64 v[6:7], off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_and_b64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_and_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_and_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x40,0x12,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_and_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_and_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x40,0x12,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_and_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_and_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x12,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x0d,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cmpswap_b32 v[254:255], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_cmpswap_b32 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x0d,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cmpswap_b32 v[6:7], off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_cmpswap_b32 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x00,0x0d,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cmpswap_b32 v[6:7], off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_cmpswap_b32 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x00,0x0d,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x00,0x0d,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x00,0x0d,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cmpswap_b32 v[6:7], v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_cmpswap_b32 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x00,0x0d,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cmpswap_b32 v[6:7], v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_cmpswap_b32 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x00,0x0d,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s3
+// GFX1250: buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x00,0x0d,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x00,0x0d,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x0d,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x00,0x0d,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x00,0x0d,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x00,0x0d,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cmpswap_b64 v[252:255], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_cmpswap_b64 v[252:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x10,0xc4,0xfc,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cmpswap_b64 v[6:9], off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_cmpswap_b64 v[6:9], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x10,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cmpswap_b64 v[6:9], off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_cmpswap_b64 v[6:9], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x10,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cmpswap_b64 v[6:9], v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_cmpswap_b64 v[6:9], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x10,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cmpswap_b64 v[6:9], v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_cmpswap_b64 v[6:9], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x10,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s3
+// GFX1250: buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s3 ; encoding: [0x03,0x80,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s3 ; encoding: [0x03,0x80,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x80,0x10,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x80,0x10,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x10,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v255, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_sub_clamp_u32 v255, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0d,0xc4,0xff,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v255, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_sub_clamp_u32 v255, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0d,0xc4,0xff,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v255, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_sub_clamp_u32 v255, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0d,0xc4,0xff,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[12:15], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[12:15], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x18,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[12:15], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[12:15], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x18,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[12:15], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[12:15], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[96:99], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[96:99], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0xc0,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[96:99], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[96:99], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0xc0,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[96:99], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[96:99], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0xc0,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s101 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s101 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x65,0xc0,0x0d,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s101 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s101 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x65,0xc0,0x0d,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s101 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s101 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x65,0xc0,0x0d,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[8:11], m0 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], m0 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x7d,0xc0,0x0d,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[8:11], m0 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], m0 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x7d,0xc0,0x0d,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[8:11], m0 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], m0 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x7d,0xc0,0x0d,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v5, v0, s[8:11], s3 idxen offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, v0, s[8:11], s3 idxen offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x90,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v5, v0, s[8:11], s3 idxen offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, v0, s[8:11], s3 idxen offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x94,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v5, v0, s[8:11], s3 idxen offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, v0, s[8:11], s3 idxen offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0xe8,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v5, v0, s[8:11], s3 offen offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, v0, s[8:11], s3 offen offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x90,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v5, v0, s[8:11], s3 offen offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, v0, s[8:11], s3 offen offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x94,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v5, v0, s[8:11], s3 offen offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, v0, s[8:11], s3 offen offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0xe8,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x90,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x94,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0xe8,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:0 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x90,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:0 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x94,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:0 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0xe8,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:7 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:7 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x90,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:7 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:7 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x94,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:7 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:7 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0xe8,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cond_sub_u32 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_cond_sub_u32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cond_sub_u32 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_cond_sub_u32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x14,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cond_sub_u32 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_cond_sub_u32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x00,0x14,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cond_sub_u32 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_cond_sub_u32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x00,0x14,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cond_sub_u32 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_cond_sub_u32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x00,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cond_sub_u32 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_cond_sub_u32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x00,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cond_sub_u32 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_cond_sub_u32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x00,0x14,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cond_sub_u32 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_cond_sub_u32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x00,0x14,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cond_sub_u32 v5, off, s[8:11], s3
+// GFX1250: buffer_atomic_cond_sub_u32 v5, off, s[8:11], s3 ; encoding: [0x03,0x00,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_cond_sub_u32 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_cond_sub_u32 v5, off, s[8:11], s3 ; encoding: [0x03,0x00,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_cond_sub_u32 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_cond_sub_u32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_cond_sub_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_cond_sub_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x00,0x14,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cond_sub_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_cond_sub_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x00,0x14,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_cond_sub_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_cond_sub_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x00,0x14,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_dec_u32 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_dec_u32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x10,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_dec_u32 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_dec_u32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x10,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_dec_u32 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_dec_u32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x00,0x10,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_dec_u32 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_dec_u32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x00,0x10,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_dec_u32 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_dec_u32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x00,0x10,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_dec_u32 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_dec_u32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x00,0x10,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_dec_u32 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_dec_u32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x00,0x10,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_dec_u32 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_dec_u32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x00,0x10,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_dec_u32 v5, off, s[8:11], s3
+// GFX1250: buffer_atomic_dec_u32 v5, off, s[8:11], s3 ; encoding: [0x03,0x00,0x10,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_dec_u32 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_dec_u32 v5, off, s[8:11], s3 ; encoding: [0x03,0x00,0x10,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_dec_u32 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_dec_u32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x10,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_dec_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_dec_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x00,0x10,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_dec_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_dec_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x00,0x10,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_dec_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_dec_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x00,0x10,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_dec_u64 v[6:7], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_dec_u64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_dec_u64 v[254:255], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_dec_u64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x13,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_dec_u64 v[6:7], off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_dec_u64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x13,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_dec_u64 v[6:7], off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_dec_u64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x13,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_dec_u64 v[6:7], off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_dec_u64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_dec_u64 v[6:7], off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_dec_u64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_dec_u64 v[6:7], v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_dec_u64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x13,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_dec_u64 v[6:7], v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_dec_u64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x13,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_dec_u64 v[6:7], off, s[8:11], s3
+// GFX1250: buffer_atomic_dec_u64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x40,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_dec_u64 v[6:7], off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_dec_u64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x40,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_dec_u64 v[6:7], off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_dec_u64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_dec_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_dec_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x40,0x13,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_dec_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_dec_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x40,0x13,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_dec_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_dec_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x13,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_inc_u32 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_inc_u32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_inc_u32 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_inc_u32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0f,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_inc_u32 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_inc_u32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0f,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_inc_u32 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_inc_u32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0f,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_inc_u32 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_inc_u32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0xc0,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_inc_u32 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_inc_u32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0xc0,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_inc_u32 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_inc_u32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0xc0,0x0f,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_inc_u32 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_inc_u32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0xc0,0x0f,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_inc_u32 v5, off, s[8:11], s3
+// GFX1250: buffer_atomic_inc_u32 v5, off, s[8:11], s3 ; encoding: [0x03,0xc0,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_inc_u32 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_inc_u32 v5, off, s[8:11], s3 ; encoding: [0x03,0xc0,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_inc_u32 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_inc_u32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0xc0,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_inc_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_inc_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0f,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_inc_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_inc_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0f,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_inc_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_inc_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0f,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_inc_u64 v[6:7], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_inc_u64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_inc_u64 v[254:255], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_inc_u64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x13,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_inc_u64 v[6:7], off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_inc_u64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x00,0x13,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_inc_u64 v[6:7], off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_inc_u64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x00,0x13,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_inc_u64 v[6:7], off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_inc_u64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x00,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_inc_u64 v[6:7], off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_inc_u64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x00,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_inc_u64 v[6:7], v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_inc_u64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x00,0x13,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_inc_u64 v[6:7], v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_inc_u64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x00,0x13,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_inc_u64 v[6:7], off, s[8:11], s3
+// GFX1250: buffer_atomic_inc_u64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x00,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_inc_u64 v[6:7], off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_inc_u64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x00,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_inc_u64 v[6:7], off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_inc_u64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_inc_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_inc_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x00,0x13,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_inc_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_inc_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x00,0x13,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_inc_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_inc_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x00,0x13,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_num_f32 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_max_num_f32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_num_f32 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_max_num_f32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x14,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_num_f32 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_max_num_f32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x14,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_num_f32 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_max_num_f32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x14,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_num_f32 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_max_num_f32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_num_f32 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_max_num_f32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_num_f32 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_max_num_f32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x14,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_num_f32 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_max_num_f32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x14,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_num_f32 v5, off, s[8:11], s3
+// GFX1250: buffer_atomic_max_num_f32 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_max_num_f32 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_max_num_f32 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_max_num_f32 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_max_num_f32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_max_num_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_max_num_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x80,0x14,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_num_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_max_num_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x80,0x14,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_num_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_max_num_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x14,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_i32 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_max_i32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_i32 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_max_i32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x0e,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_i32 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_max_i32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x0e,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_i32 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_max_i32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x0e,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_i32 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_max_i32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_i32 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_max_i32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_i32 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_max_i32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x0e,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_i32 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_max_i32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x0e,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_i32 v5, off, s[8:11], s3
+// GFX1250: buffer_atomic_max_i32 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_max_i32 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_max_i32 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_max_i32 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_max_i32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_max_i32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_max_i32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x80,0x0e,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_i32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_max_i32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x80,0x0e,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_i32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_max_i32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x0e,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_i64 v[6:7], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_max_i64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_i64 v[254:255], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_max_i64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x11,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_i64 v[6:7], off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_max_i64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0xc0,0x11,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_i64 v[6:7], off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_max_i64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0xc0,0x11,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_i64 v[6:7], off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_max_i64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0xc0,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_i64 v[6:7], off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_max_i64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0xc0,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_i64 v[6:7], v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_max_i64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0xc0,0x11,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_i64 v[6:7], v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_max_i64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0xc0,0x11,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_i64 v[6:7], off, s[8:11], s3
+// GFX1250: buffer_atomic_max_i64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0xc0,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_max_i64 v[6:7], off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_max_i64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0xc0,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_max_i64 v[6:7], off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_max_i64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0xc0,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_max_i64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_max_i64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x11,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_i64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_max_i64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x11,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_i64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_max_i64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x11,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_u32 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_max_u32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_u32 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_max_u32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0e,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_u32 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_max_u32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0e,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_u32 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_max_u32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0e,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_u32 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_max_u32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0xc0,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_u32 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_max_u32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0xc0,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_u32 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_max_u32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0xc0,0x0e,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_u32 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_max_u32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0xc0,0x0e,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_u32 v5, off, s[8:11], s3
+// GFX1250: buffer_atomic_max_u32 v5, off, s[8:11], s3 ; encoding: [0x03,0xc0,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_max_u32 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_max_u32 v5, off, s[8:11], s3 ; encoding: [0x03,0xc0,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_max_u32 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_max_u32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0xc0,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_max_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_max_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0e,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_max_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0e,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_max_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0e,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_u64 v[6:7], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_max_u64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_u64 v[254:255], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_max_u64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x12,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_u64 v[6:7], off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_max_u64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x00,0x12,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_u64 v[6:7], off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_max_u64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x00,0x12,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_u64 v[6:7], off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_max_u64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x00,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_u64 v[6:7], off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_max_u64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x00,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_u64 v[6:7], v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_max_u64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x00,0x12,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_u64 v[6:7], v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_max_u64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x00,0x12,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_u64 v[6:7], off, s[8:11], s3
+// GFX1250: buffer_atomic_max_u64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x00,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_max_u64 v[6:7], off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_max_u64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x00,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_max_u64 v[6:7], off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_max_u64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_max_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_max_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x00,0x12,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_max_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x00,0x12,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_max_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_max_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x00,0x12,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_num_f32 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_min_num_f32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_num_f32 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_min_num_f32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x14,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_num_f32 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_min_num_f32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x14,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_num_f32 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_min_num_f32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x14,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_num_f32 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_min_num_f32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_num_f32 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_min_num_f32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_num_f32 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_min_num_f32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x14,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_num_f32 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_min_num_f32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x14,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_num_f32 v5, off, s[8:11], s3
+// GFX1250: buffer_atomic_min_num_f32 v5, off, s[8:11], s3 ; encoding: [0x03,0x40,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_min_num_f32 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_min_num_f32 v5, off, s[8:11], s3 ; encoding: [0x03,0x40,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_min_num_f32 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_min_num_f32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_min_num_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_min_num_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x40,0x14,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_num_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_min_num_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x40,0x14,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_num_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_min_num_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x14,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_i32 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_min_i32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_i32 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_min_i32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x0e,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_i32 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_min_i32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x00,0x0e,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_i32 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_min_i32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x00,0x0e,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_i32 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_min_i32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x00,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_i32 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_min_i32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x00,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_i32 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_min_i32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x00,0x0e,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_i32 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_min_i32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x00,0x0e,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_i32 v5, off, s[8:11], s3
+// GFX1250: buffer_atomic_min_i32 v5, off, s[8:11], s3 ; encoding: [0x03,0x00,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_min_i32 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_min_i32 v5, off, s[8:11], s3 ; encoding: [0x03,0x00,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_min_i32 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_min_i32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_min_i32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_min_i32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x00,0x0e,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_i32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_min_i32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x00,0x0e,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_i32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_min_i32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x00,0x0e,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_i64 v[6:7], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_min_i64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_i64 v[254:255], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_min_i64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x11,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_i64 v[6:7], off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_min_i64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x11,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_i64 v[6:7], off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_min_i64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x11,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_i64 v[6:7], off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_min_i64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_i64 v[6:7], off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_min_i64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_i64 v[6:7], v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_min_i64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x11,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_i64 v[6:7], v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_min_i64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x11,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_i64 v[6:7], off, s[8:11], s3
+// GFX1250: buffer_atomic_min_i64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x40,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_min_i64 v[6:7], off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_min_i64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x40,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_min_i64 v[6:7], off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_min_i64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_min_i64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_min_i64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x40,0x11,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_i64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_min_i64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x40,0x11,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_i64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_min_i64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x11,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_u32 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_min_u32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_u32 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_min_u32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x0e,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_u32 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_min_u32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x0e,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_u32 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_min_u32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x0e,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_u32 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_min_u32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_u32 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_min_u32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_u32 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_min_u32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x0e,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_u32 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_min_u32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x0e,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_u32 v5, off, s[8:11], s3
+// GFX1250: buffer_atomic_min_u32 v5, off, s[8:11], s3 ; encoding: [0x03,0x40,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_min_u32 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_min_u32 v5, off, s[8:11], s3 ; encoding: [0x03,0x40,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_min_u32 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_min_u32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_min_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_min_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x40,0x0e,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_min_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x40,0x0e,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_min_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x0e,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_u64 v[6:7], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_min_u64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_u64 v[254:255], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_min_u64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x11,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_u64 v[6:7], off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_min_u64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x11,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_u64 v[6:7], off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_min_u64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x11,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_u64 v[6:7], off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_min_u64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_u64 v[6:7], off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_min_u64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_u64 v[6:7], v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_min_u64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x11,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_u64 v[6:7], v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_min_u64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x11,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_u64 v[6:7], off, s[8:11], s3
+// GFX1250: buffer_atomic_min_u64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x80,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_min_u64 v[6:7], off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_min_u64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x80,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_min_u64 v[6:7], off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_min_u64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_min_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_min_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x80,0x11,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_min_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x80,0x11,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_min_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_min_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x11,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_or_b32 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_or_b32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_or_b32 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_or_b32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x0f,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_or_b32 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_or_b32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x0f,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_or_b32 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_or_b32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x0f,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_or_b32 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_or_b32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_or_b32 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_or_b32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_or_b32 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_or_b32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x0f,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_or_b32 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_or_b32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x0f,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_or_b32 v5, off, s[8:11], s3
+// GFX1250: buffer_atomic_or_b32 v5, off, s[8:11], s3 ; encoding: [0x03,0x40,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_or_b32 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_or_b32 v5, off, s[8:11], s3 ; encoding: [0x03,0x40,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_or_b32 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_or_b32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_or_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_or_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x40,0x0f,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_or_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_or_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x40,0x0f,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_or_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_or_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x0f,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_or_b64 v[6:7], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_or_b64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_or_b64 v[254:255], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_or_b64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x12,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_or_b64 v[6:7], off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_or_b64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x12,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_or_b64 v[6:7], off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_or_b64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x12,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_or_b64 v[6:7], off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_or_b64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_or_b64 v[6:7], off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_or_b64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_or_b64 v[6:7], v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_or_b64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x12,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_or_b64 v[6:7], v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_or_b64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x12,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_or_b64 v[6:7], off, s[8:11], s3
+// GFX1250: buffer_atomic_or_b64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x80,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_or_b64 v[6:7], off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_or_b64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x80,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_or_b64 v[6:7], off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_or_b64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_or_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_or_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x80,0x12,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_or_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_or_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x80,0x12,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_or_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_or_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x12,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_u32 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_sub_u32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_u32 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_sub_u32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x0d,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_u32 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_sub_u32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x0d,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_u32 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_sub_u32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x0d,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_u32 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_sub_u32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_u32 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_sub_u32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_u32 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_sub_u32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x0d,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_u32 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_sub_u32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x0d,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_u32 v5, off, s[8:11], s3
+// GFX1250: buffer_atomic_sub_u32 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_sub_u32 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_sub_u32 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_sub_u32 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_sub_u32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_sub_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_sub_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x80,0x0d,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_sub_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x80,0x0d,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_sub_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x0d,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_u64 v[6:7], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_sub_u64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_u64 v[254:255], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_sub_u64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x11,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_u64 v[6:7], off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_sub_u64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x00,0x11,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_u64 v[6:7], off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_sub_u64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x00,0x11,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_u64 v[6:7], off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_sub_u64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x00,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_u64 v[6:7], off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_sub_u64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x00,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_u64 v[6:7], v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_sub_u64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x00,0x11,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_u64 v[6:7], v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_sub_u64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x00,0x11,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_u64 v[6:7], off, s[8:11], s3
+// GFX1250: buffer_atomic_sub_u64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x00,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_sub_u64 v[6:7], off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_sub_u64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x00,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_sub_u64 v[6:7], off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_sub_u64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_sub_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_sub_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x00,0x11,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_sub_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x00,0x11,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_sub_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_sub_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x00,0x11,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_swap_b32 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_swap_b32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0c,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_swap_b32 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_swap_b32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0c,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_swap_b32 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_swap_b32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0c,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_swap_b32 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_swap_b32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0c,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_swap_b32 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_swap_b32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0xc0,0x0c,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_swap_b32 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_swap_b32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0xc0,0x0c,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_swap_b32 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_swap_b32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0xc0,0x0c,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_swap_b32 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_swap_b32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0xc0,0x0c,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_swap_b32 v5, off, s[8:11], s3
+// GFX1250: buffer_atomic_swap_b32 v5, off, s[8:11], s3 ; encoding: [0x03,0xc0,0x0c,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_swap_b32 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_swap_b32 v5, off, s[8:11], s3 ; encoding: [0x03,0xc0,0x0c,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_swap_b32 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_swap_b32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0xc0,0x0c,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_swap_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_swap_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0c,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_swap_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_swap_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0c,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_swap_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_swap_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0c,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_swap_b64 v[6:7], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_swap_b64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_swap_b64 v[254:255], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_swap_b64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x10,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_swap_b64 v[6:7], off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_swap_b64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x10,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_swap_b64 v[6:7], off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_swap_b64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x10,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_swap_b64 v[6:7], off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_swap_b64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_swap_b64 v[6:7], off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_swap_b64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_swap_b64 v[6:7], v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_swap_b64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x10,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_swap_b64 v[6:7], v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_swap_b64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x10,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_swap_b64 v[6:7], off, s[8:11], s3
+// GFX1250: buffer_atomic_swap_b64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x40,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_swap_b64 v[6:7], off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_swap_b64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x40,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_swap_b64 v[6:7], off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_swap_b64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_swap_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_swap_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x40,0x10,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_swap_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_swap_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x40,0x10,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_swap_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_swap_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x10,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_xor_b32 v5, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_xor_b32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_xor_b32 v255, off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_xor_b32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x0f,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_xor_b32 v5, off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_xor_b32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x0f,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_xor_b32 v5, off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_xor_b32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x0f,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_xor_b32 v5, off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_xor_b32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_xor_b32 v5, off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_xor_b32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_xor_b32 v5, v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_xor_b32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x0f,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_xor_b32 v5, v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_xor_b32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x0f,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_xor_b32 v5, off, s[8:11], s3
+// GFX1250: buffer_atomic_xor_b32 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_xor_b32 v5, off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_xor_b32 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_xor_b32 v5, off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_xor_b32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_xor_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_xor_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x80,0x0f,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_xor_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_xor_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x80,0x0f,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_xor_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_xor_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x0f,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_xor_b64 v[6:7], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_xor_b64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_xor_b64 v[254:255], off, s[8:11], s3 offset:8388607
+// GFX1250: buffer_atomic_xor_b64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x12,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_xor_b64 v[6:7], off, s[12:15], s3 offset:8388607
+// GFX1250: buffer_atomic_xor_b64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0xc0,0x12,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_xor_b64 v[6:7], off, s[96:99], s3 offset:8388607
+// GFX1250: buffer_atomic_xor_b64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0xc0,0x12,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_xor_b64 v[6:7], off, s[8:11], s101 offset:8388607
+// GFX1250: buffer_atomic_xor_b64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0xc0,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_xor_b64 v[6:7], off, s[8:11], m0 offset:8388607
+// GFX1250: buffer_atomic_xor_b64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0xc0,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_xor_b64 v[6:7], v0, s[8:11], s3 idxen offset:8388607
+// GFX1250: buffer_atomic_xor_b64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0xc0,0x12,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_xor_b64 v[6:7], v0, s[8:11], s3 offen offset:8388607
+// GFX1250: buffer_atomic_xor_b64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0xc0,0x12,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_xor_b64 v[6:7], off, s[8:11], s3
+// GFX1250: buffer_atomic_xor_b64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0xc0,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_xor_b64 v[6:7], off, s[8:11], s3 offset:0
+// GFX1250: buffer_atomic_xor_b64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0xc0,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+
+buffer_atomic_xor_b64 v[6:7], off, s[8:11], s3 offset:7
+// GFX1250: buffer_atomic_xor_b64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0xc0,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+
+buffer_atomic_xor_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN
+// GFX1250: buffer_atomic_xor_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x12,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_xor_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RT_RETURN scope:SCOPE_SE
+// GFX1250: buffer_atomic_xor_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x12,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+
+buffer_atomic_xor_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV
+// GFX1250: buffer_atomic_xor_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x12,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+
buffer_load_b32 v5, v1, s[8:11], s3 offen offset:4095 nv
// GFX1250: buffer_load_b32 v5, v1, s[8:11], s3 offen offset:4095 nv ; encoding: [0x83,0x00,0x05,0xc4,0x05,0x10,0x80,0x40,0x01,0xff,0x0f,0x00]
// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: nv is not supported on this GPU
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_err.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_err.s
index c5bd00c..e879432 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_err.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_err.s
@@ -5,6 +5,76 @@ v_lshl_add_u64 v[2:3], v[4:5], v7, v[8:9] dpp8:[7,6,5,4,3,2,1,0]
// GFX125X-ERR-NEXT:{{^}}v_lshl_add_u64 v[2:3], v[4:5], v7, v[8:9] dpp8:[7,6,5,4,3,2,1,0]
// GFX125X-ERR-NEXT:{{^}} ^
+v_fma_f64 v[4:5], v[2:3], v[6:7], v[8:9] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX125X-ERR-NEXT:{{^}}v_fma_f64 v[4:5], v[2:3], v[6:7], v[8:9] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_div_fixup_f64 v[4:5], v[2:3], v[6:7], v[8:9] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX125X-ERR-NEXT:{{^}}v_div_fixup_f64 v[4:5], v[2:3], v[6:7], v[8:9] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_div_fmas_f64 v[4:5], v[2:3], v[6:7], v[8:9] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX125X-ERR-NEXT:{{^}}v_div_fmas_f64 v[4:5], v[2:3], v[6:7], v[8:9] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_div_scale_f64 v[4:5], s2, v[2:3], v[6:7], v[8:9] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX125X-ERR-NEXT:{{^}}v_div_scale_f64 v[4:5], s2, v[2:3], v[6:7], v[8:9] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_mad_co_u64_u32 v[4:5], s2, v2, v6, v[8:9] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX125X-ERR-NEXT:{{^}}v_mad_co_u64_u32 v[4:5], s2, v2, v6, v[8:9] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_mad_co_i64_i32 v[4:5], s2, v2, v6, v[8:9] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX125X-ERR-NEXT:{{^}}v_mad_co_i64_i32 v[4:5], s2, v2, v6, v[8:9] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_minimum_f64 v[4:5], v[2:3], v[6:7] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX125X-ERR-NEXT:{{^}}v_minimum_f64 v[4:5], v[2:3], v[6:7] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_maximum_f64 v[4:5], v[2:3], v[6:7] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX125X-ERR-NEXT:{{^}}v_maximum_f64 v[4:5], v[2:3], v[6:7] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_ldexp_f64 v[4:5], v[2:3], v6 dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX125X-ERR-NEXT:{{^}}v_ldexp_f64 v[4:5], v[2:3], v6 dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_mul_lo_u32 v4, v2, v6 dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX125X-ERR-NEXT:{{^}}v_mul_lo_u32 v4, v2, v6 dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_mul_hi_u32 v4, v2, v6 dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX125X-ERR-NEXT:{{^}}v_mul_hi_u32 v4, v2, v6 dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_mul_hi_i32 v4, v2, v6 dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX125X-ERR-NEXT:{{^}}v_mul_hi_i32 v4, v2, v6 dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_lshrrev_b64 v[4:5], v2, v[6:7] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX125X-ERR-NEXT:{{^}}v_lshrrev_b64 v[4:5], v2, v[6:7] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_ashrrev_i64 v[4:5], v2, v[6:7] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX125X-ERR-NEXT:{{^}}v_ashrrev_i64 v[4:5], v2, v[6:7] dpp8:[7,6,5,4,3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
v_mad_u32 v2, v4, v7, v8 dpp8:[7,6,5,4,3,2,1,0]
// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
// GFX125X-ERR-NEXT:{{^}}v_mad_u32 v2, v4, v7, v8 dpp8:[7,6,5,4,3,2,1,0]
@@ -42,9 +112,94 @@ v_mad_nc_i64_i32 v[4:5], v2, v5, v[6:7] dpp8:[7,6,5,4,3,2,1,0]
v_lshl_add_u64 v[2:3], v[4:5], v7, v[8:9] quad_perm:[3,2,1,0]
// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1251-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: DP ALU dpp only supports row_share
// GFX125X-ERR-NEXT:{{^}}v_lshl_add_u64 v[2:3], v[4:5], v7, v[8:9] quad_perm:[3,2,1,0]
// GFX125X-ERR-NEXT:{{^}} ^
+v_fma_f64 v[4:5], v[2:3], v[6:7], v[8:9] quad_perm:[3,2,1,0]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1251-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: DP ALU dpp only supports row_share
+// GFX125X-ERR-NEXT:{{^}}v_fma_f64 v[4:5], v[2:3], v[6:7], v[8:9] quad_perm:[3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_div_fixup_f64 v[4:5], v[2:3], v[6:7], v[8:9] quad_perm:[3,2,1,0]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1251-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: DP ALU dpp only supports row_share
+// GFX125X-ERR-NEXT:{{^}}v_div_fixup_f64 v[4:5], v[2:3], v[6:7], v[8:9] quad_perm:[3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_div_fmas_f64 v[4:5], v[2:3], v[6:7], v[8:9] quad_perm:[3,2,1,0]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1251-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: DP ALU dpp only supports row_share
+// GFX125X-ERR-NEXT:{{^}}v_div_fmas_f64 v[4:5], v[2:3], v[6:7], v[8:9] quad_perm:[3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_div_scale_f64 v[4:5], s2, v[2:3], v[6:7], v[8:9] quad_perm:[3,2,1,0]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1251-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: DP ALU dpp only supports row_share
+// GFX125X-ERR-NEXT:{{^}}v_div_scale_f64 v[4:5], s2, v[2:3], v[6:7], v[8:9] quad_perm:[3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_mad_co_u64_u32 v[4:5], s2, v2, v6, v[8:9] quad_perm:[3,2,1,0]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1251-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: DP ALU dpp only supports row_share
+// GFX125X-ERR-NEXT:{{^}}v_mad_co_u64_u32 v[4:5], s2, v2, v6, v[8:9] quad_perm:[3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_mad_co_i64_i32 v[4:5], s2, v2, v6, v[8:9] quad_perm:[3,2,1,0]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1251-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: DP ALU dpp only supports row_share
+// GFX125X-ERR-NEXT:{{^}}v_mad_co_i64_i32 v[4:5], s2, v2, v6, v[8:9] quad_perm:[3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_minimum_f64 v[4:5], v[2:3], v[6:7] quad_perm:[3,2,1,0]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1251-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: DP ALU dpp only supports row_share
+// GFX125X-ERR-NEXT:{{^}}v_minimum_f64 v[4:5], v[2:3], v[6:7] quad_perm:[3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_maximum_f64 v[4:5], v[2:3], v[6:7] quad_perm:[3,2,1,0]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1251-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: DP ALU dpp only supports row_share
+// GFX125X-ERR-NEXT:{{^}}v_maximum_f64 v[4:5], v[2:3], v[6:7] quad_perm:[3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_ldexp_f64 v[4:5], v[2:3], v6 quad_perm:[3,2,1,0]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1251-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: DP ALU dpp only supports row_share
+// GFX125X-ERR-NEXT:{{^}}v_ldexp_f64 v[4:5], v[2:3], v6 quad_perm:[3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_mul_lo_u32 v4, v2, v6 quad_perm:[3,2,1,0]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1251-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: DP ALU dpp only supports row_share
+// GFX125X-ERR-NEXT:{{^}}v_mul_lo_u32 v4, v2, v6 quad_perm:[3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_mul_hi_u32 v4, v2, v6 quad_perm:[3,2,1,0]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1251-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: DP ALU dpp only supports row_share
+// GFX125X-ERR-NEXT:{{^}}v_mul_hi_u32 v4, v2, v6 quad_perm:[3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_mul_hi_i32 v4, v2, v6 quad_perm:[3,2,1,0]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1251-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: DP ALU dpp only supports row_share
+// GFX125X-ERR-NEXT:{{^}}v_mul_hi_i32 v4, v2, v6 quad_perm:[3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_lshrrev_b64 v[4:5], v2, v[6:7] quad_perm:[3,2,1,0]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1251-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: DP ALU dpp only supports row_share
+// GFX125X-ERR-NEXT:{{^}}v_lshrrev_b64 v[4:5], v2, v[6:7] quad_perm:[3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
+v_ashrrev_i64 v[4:5], v2, v[6:7] quad_perm:[3,2,1,0]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1251-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: DP ALU dpp only supports row_share
+// GFX125X-ERR-NEXT:{{^}}v_ashrrev_i64 v[4:5], v2, v[6:7] quad_perm:[3,2,1,0]
+// GFX125X-ERR-NEXT:{{^}} ^
+
v_mad_u32 v2, v4, v7, v8 quad_perm:[3,2,1,0]
// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
// GFX1251-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: DP ALU dpp only supports row_share
@@ -87,6 +242,11 @@ v_mad_nc_i64_i32 v[4:5], v2, v5, v[6:7] quad_perm:[3,2,1,0]
// GFX125X-ERR-NEXT:{{^}}v_mad_nc_i64_i32 v[4:5], v2, v5, v[6:7] quad_perm:[3,2,1,0]
// GFX125X-ERR-NEXT:{{^}} ^
+v_trig_preop_f64 v[4:5], v[8:9], v2 row_share:1
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX125X-ERR-NEXT:{{^}}v_trig_preop_f64 v[4:5], v[8:9], v2 row_share:1
+// GFX125X-ERR-NEXT:{{^}} ^
+
v_ashr_pk_i8_i32 v1, v2, v3, v4 clamp
// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
// GFX125X-ERR-NEXT:{{^}}v_ashr_pk_i8_i32 v1, v2, v3, v4 clamp
@@ -161,3 +321,8 @@ v_cvt_scale_pk8_f32_fp4 v[10:17], s20, v8
// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
// GFX125X-ERR-NEXT:{{^}}v_cvt_scale_pk8_f32_fp4 v[10:17], s20, v8
// GFX125X-ERR-NEXT:{{^}} ^
+
+v_cvt_scale_pk16_bf16_bf6 v[10:17], s[20:22], 0xcf00
+// GFX125X-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX125X-ERR-NEXT:{{^}}v_cvt_scale_pk16_bf16_bf6 v[10:17], s[20:22], 0xcf00
+// GFX125X-ERR-NEXT:{{^}} ^
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop2_err.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop2_err.s
new file mode 100644
index 0000000..157b4d6
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop2_err.s
@@ -0,0 +1,13 @@
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1250 -show-encoding %s 2>&1 | FileCheck --check-prefix=GFX1250-ERR --implicit-check-not=error: --strict-whitespace %s
+
+v_fmaak_f32_e64_dpp v4, v2, v6, 3 row_share:1
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: e64_dpp variant of this instruction is not supported
+
+v_fmamk_f32_e64_dpp v4, v2, 3, v6 row_share:1
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: e64_dpp variant of this instruction is not supported
+
+v_fmaak_f16_e64_dpp v4, v2, v6, 3 row_share:1
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: e64_dpp variant of this instruction is not supported
+
+v_fmamk_f16_e64_dpp v4, v2, 3, v6 row_share:1
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: e64_dpp variant of this instruction is not supported
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3cx.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3cx.s
new file mode 100644
index 0000000..4aea7b3
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3cx.s
@@ -0,0 +1,3413 @@
+// NOTE: Assertions have been autogenerated by utils/update_mc_test_checks.py UTC_ARGS: --version 5
+// RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -show-encoding < %s | FileCheck --check-prefix=GFX1250 %s
+
+v_cmpx_class_f16_e64 v1, v2
+// GFX1250: v_cmpx_class_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0xfd,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_class_f16_e64 v255, v2
+// GFX1250: v_cmpx_class_f16_e64 v255, v2 ; encoding: [0x7e,0x00,0xfd,0xd4,0xff,0x05,0x02,0x00]
+
+v_cmpx_class_f16_e64 s1, v2
+// GFX1250: v_cmpx_class_f16_e64 s1, v2 ; encoding: [0x7e,0x00,0xfd,0xd4,0x01,0x04,0x02,0x00]
+
+v_cmpx_class_f16_e64 s105, v255
+// GFX1250: v_cmpx_class_f16_e64 s105, v255 ; encoding: [0x7e,0x00,0xfd,0xd4,0x69,0xfe,0x03,0x00]
+
+v_cmpx_class_f16_e64 vcc_lo, s2
+// GFX1250: v_cmpx_class_f16_e64 vcc_lo, s2 ; encoding: [0x7e,0x00,0xfd,0xd4,0x6a,0x04,0x00,0x00]
+
+v_cmpx_class_f16_e64 vcc_hi, s105
+// GFX1250: v_cmpx_class_f16_e64 vcc_hi, s105 ; encoding: [0x7e,0x00,0xfd,0xd4,0x6b,0xd2,0x00,0x00]
+
+v_cmpx_class_f16_e64 ttmp15, ttmp15
+// GFX1250: v_cmpx_class_f16_e64 ttmp15, ttmp15 ; encoding: [0x7e,0x00,0xfd,0xd4,0x7b,0xf6,0x00,0x00]
+
+v_cmpx_class_f16_e64 m0, src_scc
+// GFX1250: v_cmpx_class_f16_e64 m0, src_scc ; encoding: [0x7e,0x00,0xfd,0xd4,0x7d,0xfa,0x01,0x00]
+
+v_cmpx_class_f16_e64 exec_lo, -1
+// GFX1250: v_cmpx_class_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xfd,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_class_f16_e64 exec_hi, null
+// GFX1250: v_cmpx_class_f16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xfd,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_class_f16_e64 null, exec_lo
+// GFX1250: v_cmpx_class_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xfd,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_class_f16_e64 -1, exec_hi
+// GFX1250: v_cmpx_class_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xfd,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_class_f16_e64 0.5, m0
+// GFX1250: v_cmpx_class_f16_e64 0.5, m0 ; encoding: [0x7e,0x00,0xfd,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_class_f16_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_class_f16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xfd,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_class_f16_e64 -|0xfe0b|, vcc_hi
+// GFX1250: v_cmpx_class_f16_e64 -|0xfe0b|, vcc_hi ; encoding: [0x7e,0x01,0xfd,0xd4,0xff,0xd6,0x00,0x20,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_class_f32_e64 v1, v2
+// GFX1250: v_cmpx_class_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0xfe,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_class_f32_e64 v255, v255
+// GFX1250: v_cmpx_class_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0xfe,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_class_f32_e64 s1, s2
+// GFX1250: v_cmpx_class_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0xfe,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_class_f32_e64 s105, s105
+// GFX1250: v_cmpx_class_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0xfe,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_class_f32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_class_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xfe,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_class_f32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_class_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xfe,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_class_f32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_class_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xfe,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_class_f32_e64 m0, 0.5
+// GFX1250: v_cmpx_class_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xfe,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_class_f32_e64 exec_lo, -1
+// GFX1250: v_cmpx_class_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xfe,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_class_f32_e64 exec_hi, null
+// GFX1250: v_cmpx_class_f32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xfe,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_class_f32_e64 null, exec_lo
+// GFX1250: v_cmpx_class_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xfe,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_class_f32_e64 -1, exec_hi
+// GFX1250: v_cmpx_class_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xfe,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_class_f32_e64 0.5, m0
+// GFX1250: v_cmpx_class_f32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xfe,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_class_f32_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_class_f32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xfe,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_class_f32_e64 -|0xaf123456|, vcc_hi
+// GFX1250: v_cmpx_class_f32_e64 -|0xaf123456|, vcc_hi ; encoding: [0x7e,0x01,0xfe,0xd4,0xff,0xd6,0x00,0x20,0x56,0x34,0x12,0xaf]
+
+v_cmpx_class_f64_e64 v[2:3], v2
+// GFX1250: v_cmpx_class_f64_e64 v[2:3], v2 ; encoding: [0x7e,0x00,0xff,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_class_f64_e64 v[2:3], v255
+// GFX1250: v_cmpx_class_f64_e64 v[2:3], v255 ; encoding: [0x7e,0x00,0xff,0xd4,0x02,0xff,0x03,0x00]
+
+v_cmpx_class_f64_e64 v[2:3], s2
+// GFX1250: v_cmpx_class_f64_e64 v[2:3], s2 ; encoding: [0x7e,0x00,0xff,0xd4,0x02,0x05,0x00,0x00]
+
+v_cmpx_class_f64_e64 v[2:3], s105
+// GFX1250: v_cmpx_class_f64_e64 v[2:3], s105 ; encoding: [0x7e,0x00,0xff,0xd4,0x02,0xd3,0x00,0x00]
+
+v_cmpx_class_f64_e64 v[254:255], ttmp15
+// GFX1250: v_cmpx_class_f64_e64 v[254:255], ttmp15 ; encoding: [0x7e,0x00,0xff,0xd4,0xfe,0xf7,0x00,0x00]
+
+v_cmpx_class_f64_e64 s[2:3], vcc_hi
+// GFX1250: v_cmpx_class_f64_e64 s[2:3], vcc_hi ; encoding: [0x7e,0x00,0xff,0xd4,0x02,0xd6,0x00,0x00]
+
+v_cmpx_class_f64_e64 s[104:105], vcc_lo
+// GFX1250: v_cmpx_class_f64_e64 s[104:105], vcc_lo ; encoding: [0x7e,0x00,0xff,0xd4,0x68,0xd4,0x00,0x00]
+
+v_cmpx_class_f64_e64 vcc, m0
+// GFX1250: v_cmpx_class_f64_e64 vcc, m0 ; encoding: [0x7e,0x00,0xff,0xd4,0x6a,0xfa,0x00,0x00]
+
+v_cmpx_class_f64_e64 ttmp[14:15], exec_hi
+// GFX1250: v_cmpx_class_f64_e64 ttmp[14:15], exec_hi ; encoding: [0x7e,0x00,0xff,0xd4,0x7a,0xfe,0x00,0x00]
+
+v_cmpx_class_f64_e64 exec, exec_lo
+// GFX1250: v_cmpx_class_f64_e64 exec, exec_lo ; encoding: [0x7e,0x00,0xff,0xd4,0x7e,0xfc,0x00,0x00]
+
+v_cmpx_class_f64_e64 null, null
+// GFX1250: v_cmpx_class_f64_e64 null, null ; encoding: [0x7e,0x00,0xff,0xd4,0x7c,0xf8,0x00,0x00]
+
+v_cmpx_class_f64_e64 -1, -1
+// GFX1250: v_cmpx_class_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xff,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_class_f64_e64 0.5, 0.5
+// GFX1250: v_cmpx_class_f64_e64 0.5, 0.5 ; encoding: [0x7e,0x00,0xff,0xd4,0xf0,0xe0,0x01,0x00]
+
+v_cmpx_class_f64_e64 -|src_scc|, src_scc
+// GFX1250: v_cmpx_class_f64_e64 -|src_scc|, src_scc ; encoding: [0x7e,0x01,0xff,0xd4,0xfd,0xfa,0x01,0x20]
+
+v_cmpx_class_f64_e64 0xaf123456, 0xaf123456
+// GFX1250: v_cmpx_class_f64_e64 0xaf123456, 0xaf123456 ; encoding: [0x7e,0x00,0xff,0xd4,0xff,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_eq_f16_e64 v1, v2
+// GFX1250: v_cmpx_eq_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x82,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_eq_f16_e64 v255, v255
+// GFX1250: v_cmpx_eq_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x82,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_eq_f16_e64 s1, s2
+// GFX1250: v_cmpx_eq_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x82,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_eq_f16_e64 s105, s105
+// GFX1250: v_cmpx_eq_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x82,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_eq_f16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_eq_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x82,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_eq_f16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_eq_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x82,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_eq_f16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_eq_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x82,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_eq_f16_e64 m0, 0.5
+// GFX1250: v_cmpx_eq_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x82,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_eq_f16_e64 exec_lo, -1
+// GFX1250: v_cmpx_eq_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x82,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_eq_f16_e64 |exec_hi|, null
+// GFX1250: v_cmpx_eq_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x82,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_eq_f16_e64 null, exec_lo
+// GFX1250: v_cmpx_eq_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x82,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_eq_f16_e64 -1, exec_hi
+// GFX1250: v_cmpx_eq_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x82,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_eq_f16_e64 0.5, -m0
+// GFX1250: v_cmpx_eq_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x82,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_eq_f16_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_eq_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x82,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_eq_f16_e64 -|0xfe0b|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_eq_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x82,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_eq_f32_e64 v1, v2
+// GFX1250: v_cmpx_eq_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x92,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_eq_f32_e64 v255, v255
+// GFX1250: v_cmpx_eq_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x92,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_eq_f32_e64 s1, s2
+// GFX1250: v_cmpx_eq_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x92,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_eq_f32_e64 s105, s105
+// GFX1250: v_cmpx_eq_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x92,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_eq_f32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_eq_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x92,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_eq_f32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_eq_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x92,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_eq_f32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_eq_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x92,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_eq_f32_e64 m0, 0.5
+// GFX1250: v_cmpx_eq_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x92,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_eq_f32_e64 exec_lo, -1
+// GFX1250: v_cmpx_eq_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x92,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_eq_f32_e64 |exec_hi|, null
+// GFX1250: v_cmpx_eq_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x92,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_eq_f32_e64 null, exec_lo
+// GFX1250: v_cmpx_eq_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x92,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_eq_f32_e64 -1, exec_hi
+// GFX1250: v_cmpx_eq_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x92,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_eq_f32_e64 0.5, -m0
+// GFX1250: v_cmpx_eq_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x92,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_eq_f32_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_eq_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x92,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_eq_f32_e64 -|0xaf123456|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_eq_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x92,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+v_cmpx_eq_f64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_eq_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xa2,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_eq_f64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_eq_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xa2,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_eq_f64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_eq_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xa2,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_eq_f64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_eq_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xa2,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_eq_f64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_eq_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xa2,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_eq_f64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_eq_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xa2,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_eq_f64_e64 -|exec|, src_scc
+// GFX1250: v_cmpx_eq_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xa2,0xd4,0x7e,0xfa,0x01,0x20]
+
+v_cmpx_eq_f64_e64 null, 0.5
+// GFX1250: v_cmpx_eq_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xa2,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_eq_f64_e64 -1, -1
+// GFX1250: v_cmpx_eq_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xa2,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_eq_f64_e64 0.5, null
+// GFX1250: v_cmpx_eq_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xa2,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_eq_f64_e64 -|src_scc|, -|exec|
+// GFX1250: v_cmpx_eq_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xa2,0xd4,0xfd,0xfc,0x00,0x60]
+
+v_cmpx_eq_f64_e64 0xaf123456, -|vcc| clamp
+// GFX1250: v_cmpx_eq_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xa2,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+v_cmpx_eq_i16_e64 v1, v2
+// GFX1250: v_cmpx_eq_i16_e64 v1, v2 ; encoding: [0x7e,0x00,0xb2,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_eq_i16_e64 v255, v255
+// GFX1250: v_cmpx_eq_i16_e64 v255, v255 ; encoding: [0x7e,0x00,0xb2,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_eq_i16_e64 s1, s2
+// GFX1250: v_cmpx_eq_i16_e64 s1, s2 ; encoding: [0x7e,0x00,0xb2,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_eq_i16_e64 s105, s105
+// GFX1250: v_cmpx_eq_i16_e64 s105, s105 ; encoding: [0x7e,0x00,0xb2,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_eq_i16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_eq_i16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xb2,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_eq_i16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_eq_i16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xb2,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_eq_i16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_eq_i16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xb2,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_eq_i16_e64 m0, 0.5
+// GFX1250: v_cmpx_eq_i16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xb2,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_eq_i16_e64 exec_lo, -1
+// GFX1250: v_cmpx_eq_i16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xb2,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_eq_i16_e64 exec_hi, null
+// GFX1250: v_cmpx_eq_i16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xb2,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_eq_i16_e64 null, exec_lo
+// GFX1250: v_cmpx_eq_i16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xb2,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_eq_i16_e64 -1, exec_hi
+// GFX1250: v_cmpx_eq_i16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xb2,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_eq_i16_e64 0.5, m0
+// GFX1250: v_cmpx_eq_i16_e64 0.5, m0 ; encoding: [0x7e,0x00,0xb2,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_eq_i16_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_eq_i16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xb2,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_eq_i16_e64 0xfe0b, vcc_hi
+// GFX1250: v_cmpx_eq_i16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xb2,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_eq_i32_e64 v1, v2
+// GFX1250: v_cmpx_eq_i32_e64 v1, v2 ; encoding: [0x7e,0x00,0xc2,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_eq_i32_e64 v255, v255
+// GFX1250: v_cmpx_eq_i32_e64 v255, v255 ; encoding: [0x7e,0x00,0xc2,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_eq_i32_e64 s1, s2
+// GFX1250: v_cmpx_eq_i32_e64 s1, s2 ; encoding: [0x7e,0x00,0xc2,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_eq_i32_e64 s105, s105
+// GFX1250: v_cmpx_eq_i32_e64 s105, s105 ; encoding: [0x7e,0x00,0xc2,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_eq_i32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_eq_i32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xc2,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_eq_i32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_eq_i32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xc2,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_eq_i32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_eq_i32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xc2,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_eq_i32_e64 m0, 0.5
+// GFX1250: v_cmpx_eq_i32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xc2,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_eq_i32_e64 exec_lo, -1
+// GFX1250: v_cmpx_eq_i32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xc2,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_eq_i32_e64 exec_hi, null
+// GFX1250: v_cmpx_eq_i32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xc2,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_eq_i32_e64 null, exec_lo
+// GFX1250: v_cmpx_eq_i32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xc2,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_eq_i32_e64 -1, exec_hi
+// GFX1250: v_cmpx_eq_i32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xc2,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_eq_i32_e64 0.5, m0
+// GFX1250: v_cmpx_eq_i32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xc2,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_eq_i32_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_eq_i32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xc2,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_eq_i32_e64 0xaf123456, vcc_hi
+// GFX1250: v_cmpx_eq_i32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xc2,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_eq_i64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_eq_i64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xd2,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_eq_i64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_eq_i64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xd2,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_eq_i64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_eq_i64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xd2,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_eq_i64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_eq_i64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xd2,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_eq_i64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_eq_i64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xd2,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_eq_i64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_eq_i64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xd2,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_eq_i64_e64 exec, src_scc
+// GFX1250: v_cmpx_eq_i64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xd2,0xd4,0x7e,0xfa,0x01,0x00]
+
+v_cmpx_eq_i64_e64 null, 0.5
+// GFX1250: v_cmpx_eq_i64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xd2,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_eq_i64_e64 -1, -1
+// GFX1250: v_cmpx_eq_i64_e64 -1, -1 ; encoding: [0x7e,0x00,0xd2,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_eq_i64_e64 0.5, null
+// GFX1250: v_cmpx_eq_i64_e64 0.5, null ; encoding: [0x7e,0x00,0xd2,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_eq_i64_e64 src_scc, exec
+// GFX1250: v_cmpx_eq_i64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xd2,0xd4,0xfd,0xfc,0x00,0x00]
+
+v_cmpx_eq_i64_e64 0xaf123456, vcc
+// GFX1250: v_cmpx_eq_i64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xd2,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_eq_u16_e64 v1, v2
+// GFX1250: v_cmpx_eq_u16_e64 v1, v2 ; encoding: [0x7e,0x00,0xba,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_eq_u16_e64 v255, v255
+// GFX1250: v_cmpx_eq_u16_e64 v255, v255 ; encoding: [0x7e,0x00,0xba,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_eq_u16_e64 s1, s2
+// GFX1250: v_cmpx_eq_u16_e64 s1, s2 ; encoding: [0x7e,0x00,0xba,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_eq_u16_e64 s105, s105
+// GFX1250: v_cmpx_eq_u16_e64 s105, s105 ; encoding: [0x7e,0x00,0xba,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_eq_u16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_eq_u16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xba,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_eq_u16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_eq_u16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xba,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_eq_u16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_eq_u16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xba,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_eq_u16_e64 m0, 0.5
+// GFX1250: v_cmpx_eq_u16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xba,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_eq_u16_e64 exec_lo, -1
+// GFX1250: v_cmpx_eq_u16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xba,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_eq_u16_e64 exec_hi, null
+// GFX1250: v_cmpx_eq_u16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xba,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_eq_u16_e64 null, exec_lo
+// GFX1250: v_cmpx_eq_u16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xba,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_eq_u16_e64 -1, exec_hi
+// GFX1250: v_cmpx_eq_u16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xba,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_eq_u16_e64 0.5, m0
+// GFX1250: v_cmpx_eq_u16_e64 0.5, m0 ; encoding: [0x7e,0x00,0xba,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_eq_u16_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_eq_u16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xba,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_eq_u16_e64 0xfe0b, vcc_hi
+// GFX1250: v_cmpx_eq_u16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xba,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_eq_u32_e64 v1, v2
+// GFX1250: v_cmpx_eq_u32_e64 v1, v2 ; encoding: [0x7e,0x00,0xca,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_eq_u32_e64 v255, v255
+// GFX1250: v_cmpx_eq_u32_e64 v255, v255 ; encoding: [0x7e,0x00,0xca,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_eq_u32_e64 s1, s2
+// GFX1250: v_cmpx_eq_u32_e64 s1, s2 ; encoding: [0x7e,0x00,0xca,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_eq_u32_e64 s105, s105
+// GFX1250: v_cmpx_eq_u32_e64 s105, s105 ; encoding: [0x7e,0x00,0xca,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_eq_u32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_eq_u32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xca,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_eq_u32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_eq_u32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xca,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_eq_u32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_eq_u32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xca,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_eq_u32_e64 m0, 0.5
+// GFX1250: v_cmpx_eq_u32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xca,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_eq_u32_e64 exec_lo, -1
+// GFX1250: v_cmpx_eq_u32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xca,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_eq_u32_e64 exec_hi, null
+// GFX1250: v_cmpx_eq_u32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xca,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_eq_u32_e64 null, exec_lo
+// GFX1250: v_cmpx_eq_u32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xca,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_eq_u32_e64 -1, exec_hi
+// GFX1250: v_cmpx_eq_u32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xca,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_eq_u32_e64 0.5, m0
+// GFX1250: v_cmpx_eq_u32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xca,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_eq_u32_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_eq_u32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xca,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_eq_u32_e64 0xaf123456, vcc_hi
+// GFX1250: v_cmpx_eq_u32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xca,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_eq_u64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_eq_u64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xda,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_eq_u64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_eq_u64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xda,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_eq_u64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_eq_u64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xda,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_eq_u64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_eq_u64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xda,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_eq_u64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_eq_u64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xda,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_eq_u64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_eq_u64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xda,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_eq_u64_e64 exec, src_scc
+// GFX1250: v_cmpx_eq_u64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xda,0xd4,0x7e,0xfa,0x01,0x00]
+
+v_cmpx_eq_u64_e64 null, 0.5
+// GFX1250: v_cmpx_eq_u64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xda,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_eq_u64_e64 -1, -1
+// GFX1250: v_cmpx_eq_u64_e64 -1, -1 ; encoding: [0x7e,0x00,0xda,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_eq_u64_e64 0.5, null
+// GFX1250: v_cmpx_eq_u64_e64 0.5, null ; encoding: [0x7e,0x00,0xda,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_eq_u64_e64 src_scc, exec
+// GFX1250: v_cmpx_eq_u64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xda,0xd4,0xfd,0xfc,0x00,0x00]
+
+v_cmpx_eq_u64_e64 0xaf123456, vcc
+// GFX1250: v_cmpx_eq_u64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xda,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_ge_f16_e64 v1, v2
+// GFX1250: v_cmpx_ge_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x86,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_ge_f16_e64 v255, v255
+// GFX1250: v_cmpx_ge_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x86,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_ge_f16_e64 s1, s2
+// GFX1250: v_cmpx_ge_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x86,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_ge_f16_e64 s105, s105
+// GFX1250: v_cmpx_ge_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x86,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_ge_f16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_ge_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x86,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_ge_f16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_ge_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x86,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_ge_f16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_ge_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x86,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_ge_f16_e64 m0, 0.5
+// GFX1250: v_cmpx_ge_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x86,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_ge_f16_e64 exec_lo, -1
+// GFX1250: v_cmpx_ge_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x86,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_ge_f16_e64 |exec_hi|, null
+// GFX1250: v_cmpx_ge_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x86,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_ge_f16_e64 null, exec_lo
+// GFX1250: v_cmpx_ge_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x86,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_ge_f16_e64 -1, exec_hi
+// GFX1250: v_cmpx_ge_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x86,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_ge_f16_e64 0.5, -m0
+// GFX1250: v_cmpx_ge_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x86,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_ge_f16_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_ge_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x86,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_ge_f16_e64 -|0xfe0b|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_ge_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x86,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_ge_f32_e64 v1, v2
+// GFX1250: v_cmpx_ge_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x96,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_ge_f32_e64 v255, v255
+// GFX1250: v_cmpx_ge_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x96,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_ge_f32_e64 s1, s2
+// GFX1250: v_cmpx_ge_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x96,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_ge_f32_e64 s105, s105
+// GFX1250: v_cmpx_ge_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x96,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_ge_f32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_ge_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x96,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_ge_f32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_ge_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x96,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ge_f32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_ge_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x96,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_ge_f32_e64 m0, 0.5
+// GFX1250: v_cmpx_ge_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x96,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_ge_f32_e64 exec_lo, -1
+// GFX1250: v_cmpx_ge_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x96,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_ge_f32_e64 |exec_hi|, null
+// GFX1250: v_cmpx_ge_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x96,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_ge_f32_e64 null, exec_lo
+// GFX1250: v_cmpx_ge_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x96,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_ge_f32_e64 -1, exec_hi
+// GFX1250: v_cmpx_ge_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x96,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_ge_f32_e64 0.5, -m0
+// GFX1250: v_cmpx_ge_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x96,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_ge_f32_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_ge_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x96,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_ge_f32_e64 -|0xaf123456|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_ge_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x96,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ge_f64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_ge_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xa6,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_ge_f64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_ge_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xa6,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_ge_f64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_ge_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xa6,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_ge_f64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_ge_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xa6,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_ge_f64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_ge_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xa6,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_ge_f64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_ge_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xa6,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ge_f64_e64 -|exec|, src_scc
+// GFX1250: v_cmpx_ge_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xa6,0xd4,0x7e,0xfa,0x01,0x20]
+
+v_cmpx_ge_f64_e64 null, 0.5
+// GFX1250: v_cmpx_ge_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xa6,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_ge_f64_e64 -1, -1
+// GFX1250: v_cmpx_ge_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xa6,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_ge_f64_e64 0.5, null
+// GFX1250: v_cmpx_ge_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xa6,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_ge_f64_e64 -|src_scc|, -|exec|
+// GFX1250: v_cmpx_ge_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xa6,0xd4,0xfd,0xfc,0x00,0x60]
+
+v_cmpx_ge_f64_e64 0xaf123456, -|vcc| clamp
+// GFX1250: v_cmpx_ge_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xa6,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ge_i16_e64 v1, v2
+// GFX1250: v_cmpx_ge_i16_e64 v1, v2 ; encoding: [0x7e,0x00,0xb6,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_ge_i16_e64 v255, v255
+// GFX1250: v_cmpx_ge_i16_e64 v255, v255 ; encoding: [0x7e,0x00,0xb6,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_ge_i16_e64 s1, s2
+// GFX1250: v_cmpx_ge_i16_e64 s1, s2 ; encoding: [0x7e,0x00,0xb6,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_ge_i16_e64 s105, s105
+// GFX1250: v_cmpx_ge_i16_e64 s105, s105 ; encoding: [0x7e,0x00,0xb6,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_ge_i16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_ge_i16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xb6,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_ge_i16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_ge_i16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xb6,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_ge_i16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_ge_i16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xb6,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_ge_i16_e64 m0, 0.5
+// GFX1250: v_cmpx_ge_i16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xb6,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_ge_i16_e64 exec_lo, -1
+// GFX1250: v_cmpx_ge_i16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xb6,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_ge_i16_e64 exec_hi, null
+// GFX1250: v_cmpx_ge_i16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xb6,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_ge_i16_e64 null, exec_lo
+// GFX1250: v_cmpx_ge_i16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xb6,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_ge_i16_e64 -1, exec_hi
+// GFX1250: v_cmpx_ge_i16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xb6,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_ge_i16_e64 0.5, m0
+// GFX1250: v_cmpx_ge_i16_e64 0.5, m0 ; encoding: [0x7e,0x00,0xb6,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_ge_i16_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_ge_i16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xb6,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_ge_i16_e64 0xfe0b, vcc_hi
+// GFX1250: v_cmpx_ge_i16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xb6,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_ge_i32_e64 v1, v2
+// GFX1250: v_cmpx_ge_i32_e64 v1, v2 ; encoding: [0x7e,0x00,0xc6,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_ge_i32_e64 v255, v255
+// GFX1250: v_cmpx_ge_i32_e64 v255, v255 ; encoding: [0x7e,0x00,0xc6,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_ge_i32_e64 s1, s2
+// GFX1250: v_cmpx_ge_i32_e64 s1, s2 ; encoding: [0x7e,0x00,0xc6,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_ge_i32_e64 s105, s105
+// GFX1250: v_cmpx_ge_i32_e64 s105, s105 ; encoding: [0x7e,0x00,0xc6,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_ge_i32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_ge_i32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xc6,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_ge_i32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_ge_i32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xc6,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ge_i32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_ge_i32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xc6,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_ge_i32_e64 m0, 0.5
+// GFX1250: v_cmpx_ge_i32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xc6,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_ge_i32_e64 exec_lo, -1
+// GFX1250: v_cmpx_ge_i32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xc6,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_ge_i32_e64 exec_hi, null
+// GFX1250: v_cmpx_ge_i32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xc6,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_ge_i32_e64 null, exec_lo
+// GFX1250: v_cmpx_ge_i32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xc6,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_ge_i32_e64 -1, exec_hi
+// GFX1250: v_cmpx_ge_i32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xc6,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_ge_i32_e64 0.5, m0
+// GFX1250: v_cmpx_ge_i32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xc6,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_ge_i32_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_ge_i32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xc6,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_ge_i32_e64 0xaf123456, vcc_hi
+// GFX1250: v_cmpx_ge_i32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xc6,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ge_i64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_ge_i64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xd6,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_ge_i64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_ge_i64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xd6,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_ge_i64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_ge_i64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xd6,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_ge_i64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_ge_i64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xd6,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_ge_i64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_ge_i64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xd6,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_ge_i64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_ge_i64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xd6,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_ge_i64_e64 exec, src_scc
+// GFX1250: v_cmpx_ge_i64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xd6,0xd4,0x7e,0xfa,0x01,0x00]
+
+v_cmpx_ge_i64_e64 null, 0.5
+// GFX1250: v_cmpx_ge_i64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xd6,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_ge_i64_e64 -1, -1
+// GFX1250: v_cmpx_ge_i64_e64 -1, -1 ; encoding: [0x7e,0x00,0xd6,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_ge_i64_e64 0.5, null
+// GFX1250: v_cmpx_ge_i64_e64 0.5, null ; encoding: [0x7e,0x00,0xd6,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_ge_i64_e64 src_scc, exec
+// GFX1250: v_cmpx_ge_i64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xd6,0xd4,0xfd,0xfc,0x00,0x00]
+
+v_cmpx_ge_i64_e64 0xaf123456, vcc
+// GFX1250: v_cmpx_ge_i64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xd6,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_ge_u16_e64 v1, v2
+// GFX1250: v_cmpx_ge_u16_e64 v1, v2 ; encoding: [0x7e,0x00,0xbe,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_ge_u16_e64 v255, v255
+// GFX1250: v_cmpx_ge_u16_e64 v255, v255 ; encoding: [0x7e,0x00,0xbe,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_ge_u16_e64 s1, s2
+// GFX1250: v_cmpx_ge_u16_e64 s1, s2 ; encoding: [0x7e,0x00,0xbe,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_ge_u16_e64 s105, s105
+// GFX1250: v_cmpx_ge_u16_e64 s105, s105 ; encoding: [0x7e,0x00,0xbe,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_ge_u16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_ge_u16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xbe,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_ge_u16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_ge_u16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xbe,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_ge_u16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_ge_u16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xbe,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_ge_u16_e64 m0, 0.5
+// GFX1250: v_cmpx_ge_u16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xbe,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_ge_u16_e64 exec_lo, -1
+// GFX1250: v_cmpx_ge_u16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xbe,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_ge_u16_e64 exec_hi, null
+// GFX1250: v_cmpx_ge_u16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xbe,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_ge_u16_e64 null, exec_lo
+// GFX1250: v_cmpx_ge_u16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xbe,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_ge_u16_e64 -1, exec_hi
+// GFX1250: v_cmpx_ge_u16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xbe,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_ge_u16_e64 0.5, m0
+// GFX1250: v_cmpx_ge_u16_e64 0.5, m0 ; encoding: [0x7e,0x00,0xbe,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_ge_u16_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_ge_u16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xbe,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_ge_u16_e64 0xfe0b, vcc_hi
+// GFX1250: v_cmpx_ge_u16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xbe,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_ge_u32_e64 v1, v2
+// GFX1250: v_cmpx_ge_u32_e64 v1, v2 ; encoding: [0x7e,0x00,0xce,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_ge_u32_e64 v255, v255
+// GFX1250: v_cmpx_ge_u32_e64 v255, v255 ; encoding: [0x7e,0x00,0xce,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_ge_u32_e64 s1, s2
+// GFX1250: v_cmpx_ge_u32_e64 s1, s2 ; encoding: [0x7e,0x00,0xce,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_ge_u32_e64 s105, s105
+// GFX1250: v_cmpx_ge_u32_e64 s105, s105 ; encoding: [0x7e,0x00,0xce,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_ge_u32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_ge_u32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xce,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_ge_u32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_ge_u32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xce,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ge_u32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_ge_u32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xce,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_ge_u32_e64 m0, 0.5
+// GFX1250: v_cmpx_ge_u32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xce,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_ge_u32_e64 exec_lo, -1
+// GFX1250: v_cmpx_ge_u32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xce,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_ge_u32_e64 exec_hi, null
+// GFX1250: v_cmpx_ge_u32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xce,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_ge_u32_e64 null, exec_lo
+// GFX1250: v_cmpx_ge_u32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xce,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_ge_u32_e64 -1, exec_hi
+// GFX1250: v_cmpx_ge_u32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xce,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_ge_u32_e64 0.5, m0
+// GFX1250: v_cmpx_ge_u32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xce,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_ge_u32_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_ge_u32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xce,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_ge_u32_e64 0xaf123456, vcc_hi
+// GFX1250: v_cmpx_ge_u32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xce,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ge_u64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_ge_u64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xde,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_ge_u64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_ge_u64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xde,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_ge_u64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_ge_u64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xde,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_ge_u64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_ge_u64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xde,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_ge_u64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_ge_u64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xde,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_ge_u64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_ge_u64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xde,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_ge_u64_e64 exec, src_scc
+// GFX1250: v_cmpx_ge_u64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xde,0xd4,0x7e,0xfa,0x01,0x00]
+
+v_cmpx_ge_u64_e64 null, 0.5
+// GFX1250: v_cmpx_ge_u64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xde,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_ge_u64_e64 -1, -1
+// GFX1250: v_cmpx_ge_u64_e64 -1, -1 ; encoding: [0x7e,0x00,0xde,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_ge_u64_e64 0.5, null
+// GFX1250: v_cmpx_ge_u64_e64 0.5, null ; encoding: [0x7e,0x00,0xde,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_ge_u64_e64 src_scc, exec
+// GFX1250: v_cmpx_ge_u64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xde,0xd4,0xfd,0xfc,0x00,0x00]
+
+v_cmpx_ge_u64_e64 0xaf123456, vcc
+// GFX1250: v_cmpx_ge_u64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xde,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_gt_f16_e64 v1, v2
+// GFX1250: v_cmpx_gt_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x84,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_gt_f16_e64 v255, v255
+// GFX1250: v_cmpx_gt_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x84,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_gt_f16_e64 s1, s2
+// GFX1250: v_cmpx_gt_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x84,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_gt_f16_e64 s105, s105
+// GFX1250: v_cmpx_gt_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x84,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_gt_f16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_gt_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x84,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_gt_f16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_gt_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x84,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_gt_f16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_gt_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x84,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_gt_f16_e64 m0, 0.5
+// GFX1250: v_cmpx_gt_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x84,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_gt_f16_e64 exec_lo, -1
+// GFX1250: v_cmpx_gt_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x84,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_gt_f16_e64 |exec_hi|, null
+// GFX1250: v_cmpx_gt_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x84,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_gt_f16_e64 null, exec_lo
+// GFX1250: v_cmpx_gt_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x84,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_gt_f16_e64 -1, exec_hi
+// GFX1250: v_cmpx_gt_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x84,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_gt_f16_e64 0.5, -m0
+// GFX1250: v_cmpx_gt_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x84,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_gt_f16_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_gt_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x84,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_gt_f16_e64 -|0xfe0b|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_gt_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x84,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_gt_f32_e64 v1, v2
+// GFX1250: v_cmpx_gt_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x94,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_gt_f32_e64 v255, v255
+// GFX1250: v_cmpx_gt_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x94,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_gt_f32_e64 s1, s2
+// GFX1250: v_cmpx_gt_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x94,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_gt_f32_e64 s105, s105
+// GFX1250: v_cmpx_gt_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x94,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_gt_f32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_gt_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x94,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_gt_f32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_gt_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x94,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_gt_f32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_gt_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x94,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_gt_f32_e64 m0, 0.5
+// GFX1250: v_cmpx_gt_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x94,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_gt_f32_e64 exec_lo, -1
+// GFX1250: v_cmpx_gt_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x94,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_gt_f32_e64 |exec_hi|, null
+// GFX1250: v_cmpx_gt_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x94,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_gt_f32_e64 null, exec_lo
+// GFX1250: v_cmpx_gt_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x94,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_gt_f32_e64 -1, exec_hi
+// GFX1250: v_cmpx_gt_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x94,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_gt_f32_e64 0.5, -m0
+// GFX1250: v_cmpx_gt_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x94,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_gt_f32_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_gt_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x94,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_gt_f32_e64 -|0xaf123456|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_gt_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x94,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+v_cmpx_gt_f64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_gt_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xa4,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_gt_f64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_gt_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xa4,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_gt_f64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_gt_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xa4,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_gt_f64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_gt_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xa4,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_gt_f64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_gt_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xa4,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_gt_f64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_gt_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xa4,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_gt_f64_e64 -|exec|, src_scc
+// GFX1250: v_cmpx_gt_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xa4,0xd4,0x7e,0xfa,0x01,0x20]
+
+v_cmpx_gt_f64_e64 null, 0.5
+// GFX1250: v_cmpx_gt_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xa4,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_gt_f64_e64 -1, -1
+// GFX1250: v_cmpx_gt_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xa4,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_gt_f64_e64 0.5, null
+// GFX1250: v_cmpx_gt_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xa4,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_gt_f64_e64 -|src_scc|, -|exec|
+// GFX1250: v_cmpx_gt_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xa4,0xd4,0xfd,0xfc,0x00,0x60]
+
+v_cmpx_gt_f64_e64 0xaf123456, -|vcc| clamp
+// GFX1250: v_cmpx_gt_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xa4,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+v_cmpx_gt_i16_e64 v1, v2
+// GFX1250: v_cmpx_gt_i16_e64 v1, v2 ; encoding: [0x7e,0x00,0xb4,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_gt_i16_e64 v255, v255
+// GFX1250: v_cmpx_gt_i16_e64 v255, v255 ; encoding: [0x7e,0x00,0xb4,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_gt_i16_e64 s1, s2
+// GFX1250: v_cmpx_gt_i16_e64 s1, s2 ; encoding: [0x7e,0x00,0xb4,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_gt_i16_e64 s105, s105
+// GFX1250: v_cmpx_gt_i16_e64 s105, s105 ; encoding: [0x7e,0x00,0xb4,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_gt_i16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_gt_i16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xb4,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_gt_i16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_gt_i16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xb4,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_gt_i16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_gt_i16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xb4,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_gt_i16_e64 m0, 0.5
+// GFX1250: v_cmpx_gt_i16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xb4,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_gt_i16_e64 exec_lo, -1
+// GFX1250: v_cmpx_gt_i16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xb4,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_gt_i16_e64 exec_hi, null
+// GFX1250: v_cmpx_gt_i16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xb4,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_gt_i16_e64 null, exec_lo
+// GFX1250: v_cmpx_gt_i16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xb4,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_gt_i16_e64 -1, exec_hi
+// GFX1250: v_cmpx_gt_i16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xb4,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_gt_i16_e64 0.5, m0
+// GFX1250: v_cmpx_gt_i16_e64 0.5, m0 ; encoding: [0x7e,0x00,0xb4,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_gt_i16_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_gt_i16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xb4,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_gt_i16_e64 0xfe0b, vcc_hi
+// GFX1250: v_cmpx_gt_i16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xb4,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_gt_i32_e64 v1, v2
+// GFX1250: v_cmpx_gt_i32_e64 v1, v2 ; encoding: [0x7e,0x00,0xc4,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_gt_i32_e64 v255, v255
+// GFX1250: v_cmpx_gt_i32_e64 v255, v255 ; encoding: [0x7e,0x00,0xc4,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_gt_i32_e64 s1, s2
+// GFX1250: v_cmpx_gt_i32_e64 s1, s2 ; encoding: [0x7e,0x00,0xc4,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_gt_i32_e64 s105, s105
+// GFX1250: v_cmpx_gt_i32_e64 s105, s105 ; encoding: [0x7e,0x00,0xc4,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_gt_i32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_gt_i32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xc4,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_gt_i32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_gt_i32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xc4,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_gt_i32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_gt_i32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xc4,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_gt_i32_e64 m0, 0.5
+// GFX1250: v_cmpx_gt_i32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xc4,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_gt_i32_e64 exec_lo, -1
+// GFX1250: v_cmpx_gt_i32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xc4,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_gt_i32_e64 exec_hi, null
+// GFX1250: v_cmpx_gt_i32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xc4,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_gt_i32_e64 null, exec_lo
+// GFX1250: v_cmpx_gt_i32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xc4,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_gt_i32_e64 -1, exec_hi
+// GFX1250: v_cmpx_gt_i32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xc4,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_gt_i32_e64 0.5, m0
+// GFX1250: v_cmpx_gt_i32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xc4,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_gt_i32_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_gt_i32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xc4,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_gt_i32_e64 0xaf123456, vcc_hi
+// GFX1250: v_cmpx_gt_i32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xc4,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_gt_i64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_gt_i64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xd4,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_gt_i64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_gt_i64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xd4,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_gt_i64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_gt_i64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xd4,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_gt_i64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_gt_i64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xd4,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_gt_i64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_gt_i64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xd4,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_gt_i64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_gt_i64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xd4,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_gt_i64_e64 exec, src_scc
+// GFX1250: v_cmpx_gt_i64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xd4,0xd4,0x7e,0xfa,0x01,0x00]
+
+v_cmpx_gt_i64_e64 null, 0.5
+// GFX1250: v_cmpx_gt_i64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xd4,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_gt_i64_e64 -1, -1
+// GFX1250: v_cmpx_gt_i64_e64 -1, -1 ; encoding: [0x7e,0x00,0xd4,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_gt_i64_e64 0.5, null
+// GFX1250: v_cmpx_gt_i64_e64 0.5, null ; encoding: [0x7e,0x00,0xd4,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_gt_i64_e64 src_scc, exec
+// GFX1250: v_cmpx_gt_i64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xd4,0xd4,0xfd,0xfc,0x00,0x00]
+
+v_cmpx_gt_i64_e64 0xaf123456, vcc
+// GFX1250: v_cmpx_gt_i64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xd4,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_gt_u16_e64 v1, v2
+// GFX1250: v_cmpx_gt_u16_e64 v1, v2 ; encoding: [0x7e,0x00,0xbc,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_gt_u16_e64 v255, v255
+// GFX1250: v_cmpx_gt_u16_e64 v255, v255 ; encoding: [0x7e,0x00,0xbc,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_gt_u16_e64 s1, s2
+// GFX1250: v_cmpx_gt_u16_e64 s1, s2 ; encoding: [0x7e,0x00,0xbc,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_gt_u16_e64 s105, s105
+// GFX1250: v_cmpx_gt_u16_e64 s105, s105 ; encoding: [0x7e,0x00,0xbc,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_gt_u16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_gt_u16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xbc,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_gt_u16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_gt_u16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xbc,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_gt_u16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_gt_u16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xbc,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_gt_u16_e64 m0, 0.5
+// GFX1250: v_cmpx_gt_u16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xbc,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_gt_u16_e64 exec_lo, -1
+// GFX1250: v_cmpx_gt_u16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xbc,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_gt_u16_e64 exec_hi, null
+// GFX1250: v_cmpx_gt_u16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xbc,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_gt_u16_e64 null, exec_lo
+// GFX1250: v_cmpx_gt_u16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xbc,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_gt_u16_e64 -1, exec_hi
+// GFX1250: v_cmpx_gt_u16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xbc,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_gt_u16_e64 0.5, m0
+// GFX1250: v_cmpx_gt_u16_e64 0.5, m0 ; encoding: [0x7e,0x00,0xbc,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_gt_u16_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_gt_u16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xbc,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_gt_u16_e64 0xfe0b, vcc_hi
+// GFX1250: v_cmpx_gt_u16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xbc,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_gt_u32_e64 v1, v2
+// GFX1250: v_cmpx_gt_u32_e64 v1, v2 ; encoding: [0x7e,0x00,0xcc,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_gt_u32_e64 v255, v255
+// GFX1250: v_cmpx_gt_u32_e64 v255, v255 ; encoding: [0x7e,0x00,0xcc,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_gt_u32_e64 s1, s2
+// GFX1250: v_cmpx_gt_u32_e64 s1, s2 ; encoding: [0x7e,0x00,0xcc,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_gt_u32_e64 s105, s105
+// GFX1250: v_cmpx_gt_u32_e64 s105, s105 ; encoding: [0x7e,0x00,0xcc,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_gt_u32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_gt_u32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xcc,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_gt_u32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_gt_u32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xcc,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_gt_u32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_gt_u32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xcc,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_gt_u32_e64 m0, 0.5
+// GFX1250: v_cmpx_gt_u32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xcc,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_gt_u32_e64 exec_lo, -1
+// GFX1250: v_cmpx_gt_u32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xcc,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_gt_u32_e64 exec_hi, null
+// GFX1250: v_cmpx_gt_u32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xcc,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_gt_u32_e64 null, exec_lo
+// GFX1250: v_cmpx_gt_u32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xcc,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_gt_u32_e64 -1, exec_hi
+// GFX1250: v_cmpx_gt_u32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xcc,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_gt_u32_e64 0.5, m0
+// GFX1250: v_cmpx_gt_u32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xcc,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_gt_u32_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_gt_u32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xcc,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_gt_u32_e64 0xaf123456, vcc_hi
+// GFX1250: v_cmpx_gt_u32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xcc,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_gt_u64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_gt_u64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xdc,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_gt_u64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_gt_u64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xdc,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_gt_u64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_gt_u64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xdc,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_gt_u64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_gt_u64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xdc,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_gt_u64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_gt_u64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xdc,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_gt_u64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_gt_u64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xdc,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_gt_u64_e64 exec, src_scc
+// GFX1250: v_cmpx_gt_u64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xdc,0xd4,0x7e,0xfa,0x01,0x00]
+
+v_cmpx_gt_u64_e64 null, 0.5
+// GFX1250: v_cmpx_gt_u64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xdc,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_gt_u64_e64 -1, -1
+// GFX1250: v_cmpx_gt_u64_e64 -1, -1 ; encoding: [0x7e,0x00,0xdc,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_gt_u64_e64 0.5, null
+// GFX1250: v_cmpx_gt_u64_e64 0.5, null ; encoding: [0x7e,0x00,0xdc,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_gt_u64_e64 src_scc, exec
+// GFX1250: v_cmpx_gt_u64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xdc,0xd4,0xfd,0xfc,0x00,0x00]
+
+v_cmpx_gt_u64_e64 0xaf123456, vcc
+// GFX1250: v_cmpx_gt_u64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xdc,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_le_f16_e64 v1, v2
+// GFX1250: v_cmpx_le_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x83,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_le_f16_e64 v255, v255
+// GFX1250: v_cmpx_le_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x83,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_le_f16_e64 s1, s2
+// GFX1250: v_cmpx_le_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x83,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_le_f16_e64 s105, s105
+// GFX1250: v_cmpx_le_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x83,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_le_f16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_le_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x83,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_le_f16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_le_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x83,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_le_f16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_le_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x83,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_le_f16_e64 m0, 0.5
+// GFX1250: v_cmpx_le_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x83,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_le_f16_e64 exec_lo, -1
+// GFX1250: v_cmpx_le_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x83,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_le_f16_e64 |exec_hi|, null
+// GFX1250: v_cmpx_le_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x83,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_le_f16_e64 null, exec_lo
+// GFX1250: v_cmpx_le_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x83,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_le_f16_e64 -1, exec_hi
+// GFX1250: v_cmpx_le_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x83,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_le_f16_e64 0.5, -m0
+// GFX1250: v_cmpx_le_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x83,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_le_f16_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_le_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x83,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_le_f16_e64 -|0xfe0b|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_le_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x83,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_le_f32_e64 v1, v2
+// GFX1250: v_cmpx_le_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x93,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_le_f32_e64 v255, v255
+// GFX1250: v_cmpx_le_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x93,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_le_f32_e64 s1, s2
+// GFX1250: v_cmpx_le_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x93,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_le_f32_e64 s105, s105
+// GFX1250: v_cmpx_le_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x93,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_le_f32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_le_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x93,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_le_f32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_le_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x93,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_le_f32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_le_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x93,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_le_f32_e64 m0, 0.5
+// GFX1250: v_cmpx_le_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x93,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_le_f32_e64 exec_lo, -1
+// GFX1250: v_cmpx_le_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x93,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_le_f32_e64 |exec_hi|, null
+// GFX1250: v_cmpx_le_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x93,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_le_f32_e64 null, exec_lo
+// GFX1250: v_cmpx_le_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x93,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_le_f32_e64 -1, exec_hi
+// GFX1250: v_cmpx_le_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x93,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_le_f32_e64 0.5, -m0
+// GFX1250: v_cmpx_le_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x93,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_le_f32_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_le_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x93,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_le_f32_e64 -|0xaf123456|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_le_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x93,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+v_cmpx_le_f64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_le_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xa3,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_le_f64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_le_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xa3,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_le_f64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_le_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xa3,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_le_f64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_le_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xa3,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_le_f64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_le_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xa3,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_le_f64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_le_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xa3,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_le_f64_e64 -|exec|, src_scc
+// GFX1250: v_cmpx_le_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xa3,0xd4,0x7e,0xfa,0x01,0x20]
+
+v_cmpx_le_f64_e64 null, 0.5
+// GFX1250: v_cmpx_le_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xa3,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_le_f64_e64 -1, -1
+// GFX1250: v_cmpx_le_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xa3,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_le_f64_e64 0.5, null
+// GFX1250: v_cmpx_le_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xa3,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_le_f64_e64 -|src_scc|, -|exec|
+// GFX1250: v_cmpx_le_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xa3,0xd4,0xfd,0xfc,0x00,0x60]
+
+v_cmpx_le_f64_e64 0xaf123456, -|vcc| clamp
+// GFX1250: v_cmpx_le_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xa3,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+v_cmpx_le_i16_e64 v1, v2
+// GFX1250: v_cmpx_le_i16_e64 v1, v2 ; encoding: [0x7e,0x00,0xb3,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_le_i16_e64 v255, v255
+// GFX1250: v_cmpx_le_i16_e64 v255, v255 ; encoding: [0x7e,0x00,0xb3,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_le_i16_e64 s1, s2
+// GFX1250: v_cmpx_le_i16_e64 s1, s2 ; encoding: [0x7e,0x00,0xb3,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_le_i16_e64 s105, s105
+// GFX1250: v_cmpx_le_i16_e64 s105, s105 ; encoding: [0x7e,0x00,0xb3,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_le_i16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_le_i16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xb3,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_le_i16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_le_i16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xb3,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_le_i16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_le_i16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xb3,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_le_i16_e64 m0, 0.5
+// GFX1250: v_cmpx_le_i16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xb3,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_le_i16_e64 exec_lo, -1
+// GFX1250: v_cmpx_le_i16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xb3,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_le_i16_e64 exec_hi, null
+// GFX1250: v_cmpx_le_i16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xb3,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_le_i16_e64 null, exec_lo
+// GFX1250: v_cmpx_le_i16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xb3,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_le_i16_e64 -1, exec_hi
+// GFX1250: v_cmpx_le_i16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xb3,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_le_i16_e64 0.5, m0
+// GFX1250: v_cmpx_le_i16_e64 0.5, m0 ; encoding: [0x7e,0x00,0xb3,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_le_i16_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_le_i16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xb3,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_le_i16_e64 0xfe0b, vcc_hi
+// GFX1250: v_cmpx_le_i16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xb3,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_le_i32_e64 v1, v2
+// GFX1250: v_cmpx_le_i32_e64 v1, v2 ; encoding: [0x7e,0x00,0xc3,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_le_i32_e64 v255, v255
+// GFX1250: v_cmpx_le_i32_e64 v255, v255 ; encoding: [0x7e,0x00,0xc3,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_le_i32_e64 s1, s2
+// GFX1250: v_cmpx_le_i32_e64 s1, s2 ; encoding: [0x7e,0x00,0xc3,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_le_i32_e64 s105, s105
+// GFX1250: v_cmpx_le_i32_e64 s105, s105 ; encoding: [0x7e,0x00,0xc3,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_le_i32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_le_i32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xc3,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_le_i32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_le_i32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xc3,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_le_i32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_le_i32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xc3,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_le_i32_e64 m0, 0.5
+// GFX1250: v_cmpx_le_i32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xc3,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_le_i32_e64 exec_lo, -1
+// GFX1250: v_cmpx_le_i32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xc3,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_le_i32_e64 exec_hi, null
+// GFX1250: v_cmpx_le_i32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xc3,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_le_i32_e64 null, exec_lo
+// GFX1250: v_cmpx_le_i32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xc3,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_le_i32_e64 -1, exec_hi
+// GFX1250: v_cmpx_le_i32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xc3,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_le_i32_e64 0.5, m0
+// GFX1250: v_cmpx_le_i32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xc3,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_le_i32_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_le_i32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xc3,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_le_i32_e64 0xaf123456, vcc_hi
+// GFX1250: v_cmpx_le_i32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xc3,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_le_i64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_le_i64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xd3,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_le_i64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_le_i64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xd3,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_le_i64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_le_i64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xd3,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_le_i64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_le_i64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xd3,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_le_i64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_le_i64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xd3,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_le_i64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_le_i64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xd3,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_le_i64_e64 exec, src_scc
+// GFX1250: v_cmpx_le_i64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xd3,0xd4,0x7e,0xfa,0x01,0x00]
+
+v_cmpx_le_i64_e64 null, 0.5
+// GFX1250: v_cmpx_le_i64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xd3,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_le_i64_e64 -1, -1
+// GFX1250: v_cmpx_le_i64_e64 -1, -1 ; encoding: [0x7e,0x00,0xd3,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_le_i64_e64 0.5, null
+// GFX1250: v_cmpx_le_i64_e64 0.5, null ; encoding: [0x7e,0x00,0xd3,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_le_i64_e64 src_scc, exec
+// GFX1250: v_cmpx_le_i64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xd3,0xd4,0xfd,0xfc,0x00,0x00]
+
+v_cmpx_le_i64_e64 0xaf123456, vcc
+// GFX1250: v_cmpx_le_i64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xd3,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_le_u16_e64 v1, v2
+// GFX1250: v_cmpx_le_u16_e64 v1, v2 ; encoding: [0x7e,0x00,0xbb,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_le_u16_e64 v255, v255
+// GFX1250: v_cmpx_le_u16_e64 v255, v255 ; encoding: [0x7e,0x00,0xbb,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_le_u16_e64 s1, s2
+// GFX1250: v_cmpx_le_u16_e64 s1, s2 ; encoding: [0x7e,0x00,0xbb,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_le_u16_e64 s105, s105
+// GFX1250: v_cmpx_le_u16_e64 s105, s105 ; encoding: [0x7e,0x00,0xbb,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_le_u16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_le_u16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xbb,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_le_u16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_le_u16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xbb,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_le_u16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_le_u16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xbb,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_le_u16_e64 m0, 0.5
+// GFX1250: v_cmpx_le_u16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xbb,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_le_u16_e64 exec_lo, -1
+// GFX1250: v_cmpx_le_u16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xbb,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_le_u16_e64 exec_hi, null
+// GFX1250: v_cmpx_le_u16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xbb,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_le_u16_e64 null, exec_lo
+// GFX1250: v_cmpx_le_u16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xbb,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_le_u16_e64 -1, exec_hi
+// GFX1250: v_cmpx_le_u16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xbb,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_le_u16_e64 0.5, m0
+// GFX1250: v_cmpx_le_u16_e64 0.5, m0 ; encoding: [0x7e,0x00,0xbb,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_le_u16_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_le_u16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xbb,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_le_u16_e64 0xfe0b, vcc_hi
+// GFX1250: v_cmpx_le_u16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xbb,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_le_u32_e64 v1, v2
+// GFX1250: v_cmpx_le_u32_e64 v1, v2 ; encoding: [0x7e,0x00,0xcb,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_le_u32_e64 v255, v255
+// GFX1250: v_cmpx_le_u32_e64 v255, v255 ; encoding: [0x7e,0x00,0xcb,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_le_u32_e64 s1, s2
+// GFX1250: v_cmpx_le_u32_e64 s1, s2 ; encoding: [0x7e,0x00,0xcb,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_le_u32_e64 s105, s105
+// GFX1250: v_cmpx_le_u32_e64 s105, s105 ; encoding: [0x7e,0x00,0xcb,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_le_u32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_le_u32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xcb,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_le_u32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_le_u32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xcb,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_le_u32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_le_u32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xcb,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_le_u32_e64 m0, 0.5
+// GFX1250: v_cmpx_le_u32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xcb,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_le_u32_e64 exec_lo, -1
+// GFX1250: v_cmpx_le_u32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xcb,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_le_u32_e64 exec_hi, null
+// GFX1250: v_cmpx_le_u32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xcb,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_le_u32_e64 null, exec_lo
+// GFX1250: v_cmpx_le_u32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xcb,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_le_u32_e64 -1, exec_hi
+// GFX1250: v_cmpx_le_u32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xcb,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_le_u32_e64 0.5, m0
+// GFX1250: v_cmpx_le_u32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xcb,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_le_u32_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_le_u32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xcb,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_le_u32_e64 0xaf123456, vcc_hi
+// GFX1250: v_cmpx_le_u32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xcb,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_le_u64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_le_u64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xdb,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_le_u64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_le_u64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xdb,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_le_u64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_le_u64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xdb,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_le_u64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_le_u64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xdb,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_le_u64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_le_u64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xdb,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_le_u64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_le_u64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xdb,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_le_u64_e64 exec, src_scc
+// GFX1250: v_cmpx_le_u64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xdb,0xd4,0x7e,0xfa,0x01,0x00]
+
+v_cmpx_le_u64_e64 null, 0.5
+// GFX1250: v_cmpx_le_u64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xdb,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_le_u64_e64 -1, -1
+// GFX1250: v_cmpx_le_u64_e64 -1, -1 ; encoding: [0x7e,0x00,0xdb,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_le_u64_e64 0.5, null
+// GFX1250: v_cmpx_le_u64_e64 0.5, null ; encoding: [0x7e,0x00,0xdb,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_le_u64_e64 src_scc, exec
+// GFX1250: v_cmpx_le_u64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xdb,0xd4,0xfd,0xfc,0x00,0x00]
+
+v_cmpx_le_u64_e64 0xaf123456, vcc
+// GFX1250: v_cmpx_le_u64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xdb,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_lg_f16_e64 v1, v2
+// GFX1250: v_cmpx_lg_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x85,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_lg_f16_e64 v255, v255
+// GFX1250: v_cmpx_lg_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x85,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_lg_f16_e64 s1, s2
+// GFX1250: v_cmpx_lg_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x85,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_lg_f16_e64 s105, s105
+// GFX1250: v_cmpx_lg_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x85,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_lg_f16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_lg_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x85,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_lg_f16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_lg_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x85,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_lg_f16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_lg_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x85,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_lg_f16_e64 m0, 0.5
+// GFX1250: v_cmpx_lg_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x85,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_lg_f16_e64 exec_lo, -1
+// GFX1250: v_cmpx_lg_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x85,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_lg_f16_e64 |exec_hi|, null
+// GFX1250: v_cmpx_lg_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x85,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_lg_f16_e64 null, exec_lo
+// GFX1250: v_cmpx_lg_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x85,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_lg_f16_e64 -1, exec_hi
+// GFX1250: v_cmpx_lg_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x85,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_lg_f16_e64 0.5, -m0
+// GFX1250: v_cmpx_lg_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x85,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_lg_f16_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_lg_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x85,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_lg_f16_e64 -|0xfe0b|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_lg_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x85,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_lg_f32_e64 v1, v2
+// GFX1250: v_cmpx_lg_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x95,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_lg_f32_e64 v255, v255
+// GFX1250: v_cmpx_lg_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x95,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_lg_f32_e64 s1, s2
+// GFX1250: v_cmpx_lg_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x95,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_lg_f32_e64 s105, s105
+// GFX1250: v_cmpx_lg_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x95,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_lg_f32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_lg_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x95,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_lg_f32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_lg_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x95,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_lg_f32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_lg_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x95,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_lg_f32_e64 m0, 0.5
+// GFX1250: v_cmpx_lg_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x95,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_lg_f32_e64 exec_lo, -1
+// GFX1250: v_cmpx_lg_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x95,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_lg_f32_e64 |exec_hi|, null
+// GFX1250: v_cmpx_lg_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x95,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_lg_f32_e64 null, exec_lo
+// GFX1250: v_cmpx_lg_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x95,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_lg_f32_e64 -1, exec_hi
+// GFX1250: v_cmpx_lg_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x95,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_lg_f32_e64 0.5, -m0
+// GFX1250: v_cmpx_lg_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x95,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_lg_f32_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_lg_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x95,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_lg_f32_e64 -|0xaf123456|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_lg_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x95,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+v_cmpx_lg_f64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_lg_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xa5,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_lg_f64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_lg_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xa5,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_lg_f64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_lg_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xa5,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_lg_f64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_lg_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xa5,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_lg_f64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_lg_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xa5,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_lg_f64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_lg_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xa5,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_lg_f64_e64 -|exec|, src_scc
+// GFX1250: v_cmpx_lg_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xa5,0xd4,0x7e,0xfa,0x01,0x20]
+
+v_cmpx_lg_f64_e64 null, 0.5
+// GFX1250: v_cmpx_lg_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xa5,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_lg_f64_e64 -1, -1
+// GFX1250: v_cmpx_lg_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xa5,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_lg_f64_e64 0.5, null
+// GFX1250: v_cmpx_lg_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xa5,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_lg_f64_e64 -|src_scc|, -|exec|
+// GFX1250: v_cmpx_lg_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xa5,0xd4,0xfd,0xfc,0x00,0x60]
+
+v_cmpx_lg_f64_e64 0xaf123456, -|vcc| clamp
+// GFX1250: v_cmpx_lg_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xa5,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+v_cmpx_lt_f16_e64 v1, v2
+// GFX1250: v_cmpx_lt_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x81,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_lt_f16_e64 v255, v255
+// GFX1250: v_cmpx_lt_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x81,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_lt_f16_e64 s1, s2
+// GFX1250: v_cmpx_lt_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x81,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_lt_f16_e64 s105, s105
+// GFX1250: v_cmpx_lt_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x81,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_lt_f16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_lt_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x81,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_lt_f16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_lt_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x81,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_lt_f16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_lt_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x81,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_lt_f16_e64 m0, 0.5
+// GFX1250: v_cmpx_lt_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x81,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_lt_f16_e64 exec_lo, -1
+// GFX1250: v_cmpx_lt_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x81,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_lt_f16_e64 |exec_hi|, null
+// GFX1250: v_cmpx_lt_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x81,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_lt_f16_e64 null, exec_lo
+// GFX1250: v_cmpx_lt_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x81,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_lt_f16_e64 -1, exec_hi
+// GFX1250: v_cmpx_lt_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x81,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_lt_f16_e64 0.5, -m0
+// GFX1250: v_cmpx_lt_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x81,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_lt_f16_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_lt_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x81,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_lt_f16_e64 -|0xfe0b|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_lt_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x81,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_lt_f32_e64 v1, v2
+// GFX1250: v_cmpx_lt_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x91,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_lt_f32_e64 v255, v255
+// GFX1250: v_cmpx_lt_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x91,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_lt_f32_e64 s1, s2
+// GFX1250: v_cmpx_lt_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x91,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_lt_f32_e64 s105, s105
+// GFX1250: v_cmpx_lt_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x91,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_lt_f32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_lt_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x91,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_lt_f32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_lt_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x91,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_lt_f32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_lt_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x91,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_lt_f32_e64 m0, 0.5
+// GFX1250: v_cmpx_lt_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x91,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_lt_f32_e64 exec_lo, -1
+// GFX1250: v_cmpx_lt_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x91,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_lt_f32_e64 |exec_hi|, null
+// GFX1250: v_cmpx_lt_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x91,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_lt_f32_e64 null, exec_lo
+// GFX1250: v_cmpx_lt_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x91,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_lt_f32_e64 -1, exec_hi
+// GFX1250: v_cmpx_lt_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x91,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_lt_f32_e64 0.5, -m0
+// GFX1250: v_cmpx_lt_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x91,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_lt_f32_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_lt_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x91,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_lt_f32_e64 -|0xaf123456|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_lt_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x91,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+v_cmpx_lt_f64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_lt_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xa1,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_lt_f64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_lt_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xa1,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_lt_f64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_lt_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xa1,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_lt_f64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_lt_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xa1,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_lt_f64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_lt_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xa1,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_lt_f64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_lt_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xa1,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_lt_f64_e64 -|exec|, src_scc
+// GFX1250: v_cmpx_lt_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xa1,0xd4,0x7e,0xfa,0x01,0x20]
+
+v_cmpx_lt_f64_e64 null, 0.5
+// GFX1250: v_cmpx_lt_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xa1,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_lt_f64_e64 -1, -1
+// GFX1250: v_cmpx_lt_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xa1,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_lt_f64_e64 0.5, null
+// GFX1250: v_cmpx_lt_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xa1,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_lt_f64_e64 -|src_scc|, -|exec|
+// GFX1250: v_cmpx_lt_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xa1,0xd4,0xfd,0xfc,0x00,0x60]
+
+v_cmpx_lt_f64_e64 0xaf123456, -|vcc| clamp
+// GFX1250: v_cmpx_lt_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xa1,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+v_cmpx_lt_i16_e64 v1, v2
+// GFX1250: v_cmpx_lt_i16_e64 v1, v2 ; encoding: [0x7e,0x00,0xb1,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_lt_i16_e64 v255, v255
+// GFX1250: v_cmpx_lt_i16_e64 v255, v255 ; encoding: [0x7e,0x00,0xb1,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_lt_i16_e64 s1, s2
+// GFX1250: v_cmpx_lt_i16_e64 s1, s2 ; encoding: [0x7e,0x00,0xb1,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_lt_i16_e64 s105, s105
+// GFX1250: v_cmpx_lt_i16_e64 s105, s105 ; encoding: [0x7e,0x00,0xb1,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_lt_i16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_lt_i16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xb1,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_lt_i16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_lt_i16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xb1,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_lt_i16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_lt_i16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xb1,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_lt_i16_e64 m0, 0.5
+// GFX1250: v_cmpx_lt_i16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xb1,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_lt_i16_e64 exec_lo, -1
+// GFX1250: v_cmpx_lt_i16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xb1,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_lt_i16_e64 exec_hi, null
+// GFX1250: v_cmpx_lt_i16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xb1,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_lt_i16_e64 null, exec_lo
+// GFX1250: v_cmpx_lt_i16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xb1,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_lt_i16_e64 -1, exec_hi
+// GFX1250: v_cmpx_lt_i16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xb1,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_lt_i16_e64 0.5, m0
+// GFX1250: v_cmpx_lt_i16_e64 0.5, m0 ; encoding: [0x7e,0x00,0xb1,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_lt_i16_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_lt_i16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xb1,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_lt_i16_e64 0xfe0b, vcc_hi
+// GFX1250: v_cmpx_lt_i16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xb1,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_lt_i32_e64 v1, v2
+// GFX1250: v_cmpx_lt_i32_e64 v1, v2 ; encoding: [0x7e,0x00,0xc1,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_lt_i32_e64 v255, v255
+// GFX1250: v_cmpx_lt_i32_e64 v255, v255 ; encoding: [0x7e,0x00,0xc1,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_lt_i32_e64 s1, s2
+// GFX1250: v_cmpx_lt_i32_e64 s1, s2 ; encoding: [0x7e,0x00,0xc1,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_lt_i32_e64 s105, s105
+// GFX1250: v_cmpx_lt_i32_e64 s105, s105 ; encoding: [0x7e,0x00,0xc1,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_lt_i32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_lt_i32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xc1,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_lt_i32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_lt_i32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xc1,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_lt_i32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_lt_i32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xc1,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_lt_i32_e64 m0, 0.5
+// GFX1250: v_cmpx_lt_i32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xc1,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_lt_i32_e64 exec_lo, -1
+// GFX1250: v_cmpx_lt_i32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xc1,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_lt_i32_e64 exec_hi, null
+// GFX1250: v_cmpx_lt_i32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xc1,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_lt_i32_e64 null, exec_lo
+// GFX1250: v_cmpx_lt_i32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xc1,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_lt_i32_e64 -1, exec_hi
+// GFX1250: v_cmpx_lt_i32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xc1,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_lt_i32_e64 0.5, m0
+// GFX1250: v_cmpx_lt_i32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xc1,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_lt_i32_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_lt_i32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xc1,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_lt_i32_e64 0xaf123456, vcc_hi
+// GFX1250: v_cmpx_lt_i32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xc1,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_lt_i64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_lt_i64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xd1,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_lt_i64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_lt_i64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xd1,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_lt_i64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_lt_i64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xd1,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_lt_i64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_lt_i64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xd1,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_lt_i64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_lt_i64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xd1,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_lt_i64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_lt_i64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xd1,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_lt_i64_e64 exec, src_scc
+// GFX1250: v_cmpx_lt_i64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xd1,0xd4,0x7e,0xfa,0x01,0x00]
+
+v_cmpx_lt_i64_e64 null, 0.5
+// GFX1250: v_cmpx_lt_i64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xd1,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_lt_i64_e64 -1, -1
+// GFX1250: v_cmpx_lt_i64_e64 -1, -1 ; encoding: [0x7e,0x00,0xd1,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_lt_i64_e64 0.5, null
+// GFX1250: v_cmpx_lt_i64_e64 0.5, null ; encoding: [0x7e,0x00,0xd1,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_lt_i64_e64 src_scc, exec
+// GFX1250: v_cmpx_lt_i64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xd1,0xd4,0xfd,0xfc,0x00,0x00]
+
+v_cmpx_lt_i64_e64 0xaf123456, vcc
+// GFX1250: v_cmpx_lt_i64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xd1,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_lt_u16_e64 v1, v2
+// GFX1250: v_cmpx_lt_u16_e64 v1, v2 ; encoding: [0x7e,0x00,0xb9,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_lt_u16_e64 v255, v255
+// GFX1250: v_cmpx_lt_u16_e64 v255, v255 ; encoding: [0x7e,0x00,0xb9,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_lt_u16_e64 s1, s2
+// GFX1250: v_cmpx_lt_u16_e64 s1, s2 ; encoding: [0x7e,0x00,0xb9,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_lt_u16_e64 s105, s105
+// GFX1250: v_cmpx_lt_u16_e64 s105, s105 ; encoding: [0x7e,0x00,0xb9,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_lt_u16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_lt_u16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xb9,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_lt_u16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_lt_u16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xb9,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_lt_u16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_lt_u16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xb9,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_lt_u16_e64 m0, 0.5
+// GFX1250: v_cmpx_lt_u16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xb9,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_lt_u16_e64 exec_lo, -1
+// GFX1250: v_cmpx_lt_u16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xb9,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_lt_u16_e64 exec_hi, null
+// GFX1250: v_cmpx_lt_u16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xb9,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_lt_u16_e64 null, exec_lo
+// GFX1250: v_cmpx_lt_u16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xb9,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_lt_u16_e64 -1, exec_hi
+// GFX1250: v_cmpx_lt_u16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xb9,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_lt_u16_e64 0.5, m0
+// GFX1250: v_cmpx_lt_u16_e64 0.5, m0 ; encoding: [0x7e,0x00,0xb9,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_lt_u16_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_lt_u16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xb9,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_lt_u16_e64 0xfe0b, vcc_hi
+// GFX1250: v_cmpx_lt_u16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xb9,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_lt_u32_e64 v1, v2
+// GFX1250: v_cmpx_lt_u32_e64 v1, v2 ; encoding: [0x7e,0x00,0xc9,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_lt_u32_e64 v255, v255
+// GFX1250: v_cmpx_lt_u32_e64 v255, v255 ; encoding: [0x7e,0x00,0xc9,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_lt_u32_e64 s1, s2
+// GFX1250: v_cmpx_lt_u32_e64 s1, s2 ; encoding: [0x7e,0x00,0xc9,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_lt_u32_e64 s105, s105
+// GFX1250: v_cmpx_lt_u32_e64 s105, s105 ; encoding: [0x7e,0x00,0xc9,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_lt_u32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_lt_u32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xc9,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_lt_u32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_lt_u32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xc9,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_lt_u32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_lt_u32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xc9,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_lt_u32_e64 m0, 0.5
+// GFX1250: v_cmpx_lt_u32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xc9,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_lt_u32_e64 exec_lo, -1
+// GFX1250: v_cmpx_lt_u32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xc9,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_lt_u32_e64 exec_hi, null
+// GFX1250: v_cmpx_lt_u32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xc9,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_lt_u32_e64 null, exec_lo
+// GFX1250: v_cmpx_lt_u32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xc9,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_lt_u32_e64 -1, exec_hi
+// GFX1250: v_cmpx_lt_u32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xc9,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_lt_u32_e64 0.5, m0
+// GFX1250: v_cmpx_lt_u32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xc9,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_lt_u32_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_lt_u32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xc9,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_lt_u32_e64 0xaf123456, vcc_hi
+// GFX1250: v_cmpx_lt_u32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xc9,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_lt_u64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_lt_u64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xd9,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_lt_u64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_lt_u64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xd9,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_lt_u64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_lt_u64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xd9,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_lt_u64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_lt_u64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xd9,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_lt_u64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_lt_u64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xd9,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_lt_u64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_lt_u64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xd9,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_lt_u64_e64 exec, src_scc
+// GFX1250: v_cmpx_lt_u64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xd9,0xd4,0x7e,0xfa,0x01,0x00]
+
+v_cmpx_lt_u64_e64 null, 0.5
+// GFX1250: v_cmpx_lt_u64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xd9,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_lt_u64_e64 -1, -1
+// GFX1250: v_cmpx_lt_u64_e64 -1, -1 ; encoding: [0x7e,0x00,0xd9,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_lt_u64_e64 0.5, null
+// GFX1250: v_cmpx_lt_u64_e64 0.5, null ; encoding: [0x7e,0x00,0xd9,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_lt_u64_e64 src_scc, exec
+// GFX1250: v_cmpx_lt_u64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xd9,0xd4,0xfd,0xfc,0x00,0x00]
+
+v_cmpx_lt_u64_e64 0xaf123456, vcc
+// GFX1250: v_cmpx_lt_u64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xd9,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_ne_i16_e64 v1, v2
+// GFX1250: v_cmpx_ne_i16_e64 v1, v2 ; encoding: [0x7e,0x00,0xb5,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_ne_i16_e64 v255, v255
+// GFX1250: v_cmpx_ne_i16_e64 v255, v255 ; encoding: [0x7e,0x00,0xb5,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_ne_i16_e64 s1, s2
+// GFX1250: v_cmpx_ne_i16_e64 s1, s2 ; encoding: [0x7e,0x00,0xb5,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_ne_i16_e64 s105, s105
+// GFX1250: v_cmpx_ne_i16_e64 s105, s105 ; encoding: [0x7e,0x00,0xb5,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_ne_i16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_ne_i16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xb5,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_ne_i16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_ne_i16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xb5,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_ne_i16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_ne_i16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xb5,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_ne_i16_e64 m0, 0.5
+// GFX1250: v_cmpx_ne_i16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xb5,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_ne_i16_e64 exec_lo, -1
+// GFX1250: v_cmpx_ne_i16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xb5,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_ne_i16_e64 exec_hi, null
+// GFX1250: v_cmpx_ne_i16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xb5,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_ne_i16_e64 null, exec_lo
+// GFX1250: v_cmpx_ne_i16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xb5,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_ne_i16_e64 -1, exec_hi
+// GFX1250: v_cmpx_ne_i16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xb5,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_ne_i16_e64 0.5, m0
+// GFX1250: v_cmpx_ne_i16_e64 0.5, m0 ; encoding: [0x7e,0x00,0xb5,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_ne_i16_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_ne_i16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xb5,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_ne_i16_e64 0xfe0b, vcc_hi
+// GFX1250: v_cmpx_ne_i16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xb5,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_ne_i32_e64 v1, v2
+// GFX1250: v_cmpx_ne_i32_e64 v1, v2 ; encoding: [0x7e,0x00,0xc5,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_ne_i32_e64 v255, v255
+// GFX1250: v_cmpx_ne_i32_e64 v255, v255 ; encoding: [0x7e,0x00,0xc5,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_ne_i32_e64 s1, s2
+// GFX1250: v_cmpx_ne_i32_e64 s1, s2 ; encoding: [0x7e,0x00,0xc5,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_ne_i32_e64 s105, s105
+// GFX1250: v_cmpx_ne_i32_e64 s105, s105 ; encoding: [0x7e,0x00,0xc5,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_ne_i32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_ne_i32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xc5,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_ne_i32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_ne_i32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xc5,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ne_i32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_ne_i32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xc5,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_ne_i32_e64 m0, 0.5
+// GFX1250: v_cmpx_ne_i32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xc5,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_ne_i32_e64 exec_lo, -1
+// GFX1250: v_cmpx_ne_i32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xc5,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_ne_i32_e64 exec_hi, null
+// GFX1250: v_cmpx_ne_i32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xc5,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_ne_i32_e64 null, exec_lo
+// GFX1250: v_cmpx_ne_i32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xc5,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_ne_i32_e64 -1, exec_hi
+// GFX1250: v_cmpx_ne_i32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xc5,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_ne_i32_e64 0.5, m0
+// GFX1250: v_cmpx_ne_i32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xc5,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_ne_i32_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_ne_i32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xc5,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_ne_i32_e64 0xaf123456, vcc_hi
+// GFX1250: v_cmpx_ne_i32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xc5,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ne_i64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_ne_i64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xd5,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_ne_i64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_ne_i64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xd5,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_ne_i64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_ne_i64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xd5,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_ne_i64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_ne_i64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xd5,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_ne_i64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_ne_i64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xd5,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_ne_i64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_ne_i64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xd5,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_ne_i64_e64 exec, src_scc
+// GFX1250: v_cmpx_ne_i64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xd5,0xd4,0x7e,0xfa,0x01,0x00]
+
+v_cmpx_ne_i64_e64 null, 0.5
+// GFX1250: v_cmpx_ne_i64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xd5,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_ne_i64_e64 -1, -1
+// GFX1250: v_cmpx_ne_i64_e64 -1, -1 ; encoding: [0x7e,0x00,0xd5,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_ne_i64_e64 0.5, null
+// GFX1250: v_cmpx_ne_i64_e64 0.5, null ; encoding: [0x7e,0x00,0xd5,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_ne_i64_e64 src_scc, exec
+// GFX1250: v_cmpx_ne_i64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xd5,0xd4,0xfd,0xfc,0x00,0x00]
+
+v_cmpx_ne_i64_e64 0xaf123456, vcc
+// GFX1250: v_cmpx_ne_i64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xd5,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_ne_u16_e64 v1, v2
+// GFX1250: v_cmpx_ne_u16_e64 v1, v2 ; encoding: [0x7e,0x00,0xbd,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_ne_u16_e64 v255, v255
+// GFX1250: v_cmpx_ne_u16_e64 v255, v255 ; encoding: [0x7e,0x00,0xbd,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_ne_u16_e64 s1, s2
+// GFX1250: v_cmpx_ne_u16_e64 s1, s2 ; encoding: [0x7e,0x00,0xbd,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_ne_u16_e64 s105, s105
+// GFX1250: v_cmpx_ne_u16_e64 s105, s105 ; encoding: [0x7e,0x00,0xbd,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_ne_u16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_ne_u16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xbd,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_ne_u16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_ne_u16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xbd,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_ne_u16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_ne_u16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xbd,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_ne_u16_e64 m0, 0.5
+// GFX1250: v_cmpx_ne_u16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xbd,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_ne_u16_e64 exec_lo, -1
+// GFX1250: v_cmpx_ne_u16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xbd,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_ne_u16_e64 exec_hi, null
+// GFX1250: v_cmpx_ne_u16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xbd,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_ne_u16_e64 null, exec_lo
+// GFX1250: v_cmpx_ne_u16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xbd,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_ne_u16_e64 -1, exec_hi
+// GFX1250: v_cmpx_ne_u16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xbd,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_ne_u16_e64 0.5, m0
+// GFX1250: v_cmpx_ne_u16_e64 0.5, m0 ; encoding: [0x7e,0x00,0xbd,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_ne_u16_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_ne_u16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xbd,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_ne_u16_e64 0xfe0b, vcc_hi
+// GFX1250: v_cmpx_ne_u16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xbd,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_ne_u32_e64 v1, v2
+// GFX1250: v_cmpx_ne_u32_e64 v1, v2 ; encoding: [0x7e,0x00,0xcd,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_ne_u32_e64 v255, v255
+// GFX1250: v_cmpx_ne_u32_e64 v255, v255 ; encoding: [0x7e,0x00,0xcd,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_ne_u32_e64 s1, s2
+// GFX1250: v_cmpx_ne_u32_e64 s1, s2 ; encoding: [0x7e,0x00,0xcd,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_ne_u32_e64 s105, s105
+// GFX1250: v_cmpx_ne_u32_e64 s105, s105 ; encoding: [0x7e,0x00,0xcd,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_ne_u32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_ne_u32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xcd,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_ne_u32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_ne_u32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xcd,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ne_u32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_ne_u32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xcd,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_ne_u32_e64 m0, 0.5
+// GFX1250: v_cmpx_ne_u32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xcd,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_ne_u32_e64 exec_lo, -1
+// GFX1250: v_cmpx_ne_u32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xcd,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_ne_u32_e64 exec_hi, null
+// GFX1250: v_cmpx_ne_u32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xcd,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_ne_u32_e64 null, exec_lo
+// GFX1250: v_cmpx_ne_u32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xcd,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_ne_u32_e64 -1, exec_hi
+// GFX1250: v_cmpx_ne_u32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xcd,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_ne_u32_e64 0.5, m0
+// GFX1250: v_cmpx_ne_u32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xcd,0xd4,0xf0,0xfa,0x00,0x00]
+
+v_cmpx_ne_u32_e64 src_scc, vcc_lo
+// GFX1250: v_cmpx_ne_u32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xcd,0xd4,0xfd,0xd4,0x00,0x00]
+
+v_cmpx_ne_u32_e64 0xaf123456, vcc_hi
+// GFX1250: v_cmpx_ne_u32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xcd,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ne_u64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_ne_u64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xdd,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_ne_u64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_ne_u64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xdd,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_ne_u64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_ne_u64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xdd,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_ne_u64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_ne_u64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xdd,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_ne_u64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_ne_u64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xdd,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_ne_u64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_ne_u64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xdd,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_ne_u64_e64 exec, src_scc
+// GFX1250: v_cmpx_ne_u64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xdd,0xd4,0x7e,0xfa,0x01,0x00]
+
+v_cmpx_ne_u64_e64 null, 0.5
+// GFX1250: v_cmpx_ne_u64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xdd,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_ne_u64_e64 -1, -1
+// GFX1250: v_cmpx_ne_u64_e64 -1, -1 ; encoding: [0x7e,0x00,0xdd,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_ne_u64_e64 0.5, null
+// GFX1250: v_cmpx_ne_u64_e64 0.5, null ; encoding: [0x7e,0x00,0xdd,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_ne_u64_e64 src_scc, exec
+// GFX1250: v_cmpx_ne_u64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xdd,0xd4,0xfd,0xfc,0x00,0x00]
+
+v_cmpx_ne_u64_e64 0xaf123456, vcc
+// GFX1250: v_cmpx_ne_u64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xdd,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+v_cmpx_neq_f16_e64 v1, v2
+// GFX1250: v_cmpx_neq_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x8d,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_neq_f16_e64 v255, v255
+// GFX1250: v_cmpx_neq_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x8d,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_neq_f16_e64 s1, s2
+// GFX1250: v_cmpx_neq_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x8d,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_neq_f16_e64 s105, s105
+// GFX1250: v_cmpx_neq_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x8d,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_neq_f16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_neq_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x8d,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_neq_f16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_neq_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x8d,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_neq_f16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_neq_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x8d,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_neq_f16_e64 m0, 0.5
+// GFX1250: v_cmpx_neq_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x8d,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_neq_f16_e64 exec_lo, -1
+// GFX1250: v_cmpx_neq_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x8d,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_neq_f16_e64 |exec_hi|, null
+// GFX1250: v_cmpx_neq_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x8d,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_neq_f16_e64 null, exec_lo
+// GFX1250: v_cmpx_neq_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x8d,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_neq_f16_e64 -1, exec_hi
+// GFX1250: v_cmpx_neq_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x8d,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_neq_f16_e64 0.5, -m0
+// GFX1250: v_cmpx_neq_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x8d,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_neq_f16_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_neq_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x8d,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_neq_f16_e64 -|0xfe0b|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_neq_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x8d,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_neq_f32_e64 v1, v2
+// GFX1250: v_cmpx_neq_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x9d,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_neq_f32_e64 v255, v255
+// GFX1250: v_cmpx_neq_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x9d,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_neq_f32_e64 s1, s2
+// GFX1250: v_cmpx_neq_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x9d,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_neq_f32_e64 s105, s105
+// GFX1250: v_cmpx_neq_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x9d,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_neq_f32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_neq_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x9d,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_neq_f32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_neq_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x9d,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_neq_f32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_neq_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x9d,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_neq_f32_e64 m0, 0.5
+// GFX1250: v_cmpx_neq_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x9d,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_neq_f32_e64 exec_lo, -1
+// GFX1250: v_cmpx_neq_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x9d,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_neq_f32_e64 |exec_hi|, null
+// GFX1250: v_cmpx_neq_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x9d,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_neq_f32_e64 null, exec_lo
+// GFX1250: v_cmpx_neq_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x9d,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_neq_f32_e64 -1, exec_hi
+// GFX1250: v_cmpx_neq_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x9d,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_neq_f32_e64 0.5, -m0
+// GFX1250: v_cmpx_neq_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x9d,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_neq_f32_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_neq_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x9d,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_neq_f32_e64 -|0xaf123456|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_neq_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x9d,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+v_cmpx_neq_f64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_neq_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xad,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_neq_f64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_neq_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xad,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_neq_f64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_neq_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xad,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_neq_f64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_neq_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xad,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_neq_f64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_neq_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xad,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_neq_f64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_neq_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xad,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_neq_f64_e64 -|exec|, src_scc
+// GFX1250: v_cmpx_neq_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xad,0xd4,0x7e,0xfa,0x01,0x20]
+
+v_cmpx_neq_f64_e64 null, 0.5
+// GFX1250: v_cmpx_neq_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xad,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_neq_f64_e64 -1, -1
+// GFX1250: v_cmpx_neq_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xad,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_neq_f64_e64 0.5, null
+// GFX1250: v_cmpx_neq_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xad,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_neq_f64_e64 -|src_scc|, -|exec|
+// GFX1250: v_cmpx_neq_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xad,0xd4,0xfd,0xfc,0x00,0x60]
+
+v_cmpx_neq_f64_e64 0xaf123456, -|vcc| clamp
+// GFX1250: v_cmpx_neq_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xad,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nge_f16_e64 v1, v2
+// GFX1250: v_cmpx_nge_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x89,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_nge_f16_e64 v255, v255
+// GFX1250: v_cmpx_nge_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x89,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_nge_f16_e64 s1, s2
+// GFX1250: v_cmpx_nge_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x89,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_nge_f16_e64 s105, s105
+// GFX1250: v_cmpx_nge_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x89,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_nge_f16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_nge_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x89,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_nge_f16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_nge_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x89,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_nge_f16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_nge_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x89,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_nge_f16_e64 m0, 0.5
+// GFX1250: v_cmpx_nge_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x89,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_nge_f16_e64 exec_lo, -1
+// GFX1250: v_cmpx_nge_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x89,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_nge_f16_e64 |exec_hi|, null
+// GFX1250: v_cmpx_nge_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x89,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_nge_f16_e64 null, exec_lo
+// GFX1250: v_cmpx_nge_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x89,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_nge_f16_e64 -1, exec_hi
+// GFX1250: v_cmpx_nge_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x89,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_nge_f16_e64 0.5, -m0
+// GFX1250: v_cmpx_nge_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x89,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_nge_f16_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_nge_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x89,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_nge_f16_e64 -|0xfe0b|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_nge_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x89,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_nge_f32_e64 v1, v2
+// GFX1250: v_cmpx_nge_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x99,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_nge_f32_e64 v255, v255
+// GFX1250: v_cmpx_nge_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x99,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_nge_f32_e64 s1, s2
+// GFX1250: v_cmpx_nge_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x99,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_nge_f32_e64 s105, s105
+// GFX1250: v_cmpx_nge_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x99,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_nge_f32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_nge_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x99,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_nge_f32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_nge_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x99,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nge_f32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_nge_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x99,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_nge_f32_e64 m0, 0.5
+// GFX1250: v_cmpx_nge_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x99,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_nge_f32_e64 exec_lo, -1
+// GFX1250: v_cmpx_nge_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x99,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_nge_f32_e64 |exec_hi|, null
+// GFX1250: v_cmpx_nge_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x99,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_nge_f32_e64 null, exec_lo
+// GFX1250: v_cmpx_nge_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x99,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_nge_f32_e64 -1, exec_hi
+// GFX1250: v_cmpx_nge_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x99,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_nge_f32_e64 0.5, -m0
+// GFX1250: v_cmpx_nge_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x99,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_nge_f32_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_nge_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x99,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_nge_f32_e64 -|0xaf123456|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_nge_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x99,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nge_f64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_nge_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xa9,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_nge_f64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_nge_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xa9,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_nge_f64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_nge_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xa9,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_nge_f64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_nge_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xa9,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_nge_f64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_nge_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xa9,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_nge_f64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_nge_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xa9,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nge_f64_e64 -|exec|, src_scc
+// GFX1250: v_cmpx_nge_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xa9,0xd4,0x7e,0xfa,0x01,0x20]
+
+v_cmpx_nge_f64_e64 null, 0.5
+// GFX1250: v_cmpx_nge_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xa9,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_nge_f64_e64 -1, -1
+// GFX1250: v_cmpx_nge_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xa9,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_nge_f64_e64 0.5, null
+// GFX1250: v_cmpx_nge_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xa9,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_nge_f64_e64 -|src_scc|, -|exec|
+// GFX1250: v_cmpx_nge_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xa9,0xd4,0xfd,0xfc,0x00,0x60]
+
+v_cmpx_nge_f64_e64 0xaf123456, -|vcc| clamp
+// GFX1250: v_cmpx_nge_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xa9,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ngt_f16_e64 v1, v2
+// GFX1250: v_cmpx_ngt_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x8b,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_ngt_f16_e64 v255, v255
+// GFX1250: v_cmpx_ngt_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x8b,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_ngt_f16_e64 s1, s2
+// GFX1250: v_cmpx_ngt_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x8b,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_ngt_f16_e64 s105, s105
+// GFX1250: v_cmpx_ngt_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x8b,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_ngt_f16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_ngt_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x8b,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_ngt_f16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_ngt_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x8b,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_ngt_f16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_ngt_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x8b,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_ngt_f16_e64 m0, 0.5
+// GFX1250: v_cmpx_ngt_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x8b,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_ngt_f16_e64 exec_lo, -1
+// GFX1250: v_cmpx_ngt_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x8b,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_ngt_f16_e64 |exec_hi|, null
+// GFX1250: v_cmpx_ngt_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x8b,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_ngt_f16_e64 null, exec_lo
+// GFX1250: v_cmpx_ngt_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x8b,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_ngt_f16_e64 -1, exec_hi
+// GFX1250: v_cmpx_ngt_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x8b,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_ngt_f16_e64 0.5, -m0
+// GFX1250: v_cmpx_ngt_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x8b,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_ngt_f16_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_ngt_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x8b,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_ngt_f16_e64 -|0xfe0b|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_ngt_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x8b,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 v1, v2
+// GFX1250: v_cmpx_ngt_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x9b,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_ngt_f32_e64 v255, v255
+// GFX1250: v_cmpx_ngt_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x9b,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_ngt_f32_e64 s1, s2
+// GFX1250: v_cmpx_ngt_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x9b,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 s105, s105
+// GFX1250: v_cmpx_ngt_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x9b,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_ngt_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x9b,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_ngt_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x9b,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ngt_f32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_ngt_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x9b,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_ngt_f32_e64 m0, 0.5
+// GFX1250: v_cmpx_ngt_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x9b,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_ngt_f32_e64 exec_lo, -1
+// GFX1250: v_cmpx_ngt_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x9b,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_ngt_f32_e64 |exec_hi|, null
+// GFX1250: v_cmpx_ngt_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x9b,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 null, exec_lo
+// GFX1250: v_cmpx_ngt_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x9b,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 -1, exec_hi
+// GFX1250: v_cmpx_ngt_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x9b,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_ngt_f32_e64 0.5, -m0
+// GFX1250: v_cmpx_ngt_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x9b,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_ngt_f32_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_ngt_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x9b,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_ngt_f32_e64 -|0xaf123456|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_ngt_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x9b,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ngt_f64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_ngt_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xab,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_ngt_f64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_ngt_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xab,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_ngt_f64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_ngt_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xab,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_ngt_f64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_ngt_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xab,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_ngt_f64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_ngt_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xab,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_ngt_f64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_ngt_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xab,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_ngt_f64_e64 -|exec|, src_scc
+// GFX1250: v_cmpx_ngt_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xab,0xd4,0x7e,0xfa,0x01,0x20]
+
+v_cmpx_ngt_f64_e64 null, 0.5
+// GFX1250: v_cmpx_ngt_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xab,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_ngt_f64_e64 -1, -1
+// GFX1250: v_cmpx_ngt_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xab,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_ngt_f64_e64 0.5, null
+// GFX1250: v_cmpx_ngt_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xab,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_ngt_f64_e64 -|src_scc|, -|exec|
+// GFX1250: v_cmpx_ngt_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xab,0xd4,0xfd,0xfc,0x00,0x60]
+
+v_cmpx_ngt_f64_e64 0xaf123456, -|vcc| clamp
+// GFX1250: v_cmpx_ngt_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xab,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nle_f16_e64 v1, v2
+// GFX1250: v_cmpx_nle_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x8c,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_nle_f16_e64 v255, v255
+// GFX1250: v_cmpx_nle_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x8c,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_nle_f16_e64 s1, s2
+// GFX1250: v_cmpx_nle_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x8c,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_nle_f16_e64 s105, s105
+// GFX1250: v_cmpx_nle_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x8c,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_nle_f16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_nle_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x8c,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_nle_f16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_nle_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x8c,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_nle_f16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_nle_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x8c,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_nle_f16_e64 m0, 0.5
+// GFX1250: v_cmpx_nle_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x8c,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_nle_f16_e64 exec_lo, -1
+// GFX1250: v_cmpx_nle_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x8c,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_nle_f16_e64 |exec_hi|, null
+// GFX1250: v_cmpx_nle_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x8c,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_nle_f16_e64 null, exec_lo
+// GFX1250: v_cmpx_nle_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x8c,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_nle_f16_e64 -1, exec_hi
+// GFX1250: v_cmpx_nle_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x8c,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_nle_f16_e64 0.5, -m0
+// GFX1250: v_cmpx_nle_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x8c,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_nle_f16_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_nle_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x8c,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_nle_f16_e64 -|0xfe0b|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_nle_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x8c,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_nle_f32_e64 v1, v2
+// GFX1250: v_cmpx_nle_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x9c,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_nle_f32_e64 v255, v255
+// GFX1250: v_cmpx_nle_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x9c,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_nle_f32_e64 s1, s2
+// GFX1250: v_cmpx_nle_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x9c,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_nle_f32_e64 s105, s105
+// GFX1250: v_cmpx_nle_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x9c,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_nle_f32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_nle_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x9c,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_nle_f32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_nle_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x9c,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nle_f32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_nle_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x9c,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_nle_f32_e64 m0, 0.5
+// GFX1250: v_cmpx_nle_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x9c,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_nle_f32_e64 exec_lo, -1
+// GFX1250: v_cmpx_nle_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x9c,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_nle_f32_e64 |exec_hi|, null
+// GFX1250: v_cmpx_nle_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x9c,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_nle_f32_e64 null, exec_lo
+// GFX1250: v_cmpx_nle_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x9c,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_nle_f32_e64 -1, exec_hi
+// GFX1250: v_cmpx_nle_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x9c,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_nle_f32_e64 0.5, -m0
+// GFX1250: v_cmpx_nle_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x9c,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_nle_f32_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_nle_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x9c,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_nle_f32_e64 -|0xaf123456|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_nle_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x9c,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nle_f64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_nle_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xac,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_nle_f64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_nle_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xac,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_nle_f64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_nle_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xac,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_nle_f64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_nle_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xac,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_nle_f64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_nle_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xac,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_nle_f64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_nle_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xac,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nle_f64_e64 -|exec|, src_scc
+// GFX1250: v_cmpx_nle_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xac,0xd4,0x7e,0xfa,0x01,0x20]
+
+v_cmpx_nle_f64_e64 null, 0.5
+// GFX1250: v_cmpx_nle_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xac,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_nle_f64_e64 -1, -1
+// GFX1250: v_cmpx_nle_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xac,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_nle_f64_e64 0.5, null
+// GFX1250: v_cmpx_nle_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xac,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_nle_f64_e64 -|src_scc|, -|exec|
+// GFX1250: v_cmpx_nle_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xac,0xd4,0xfd,0xfc,0x00,0x60]
+
+v_cmpx_nle_f64_e64 0xaf123456, -|vcc| clamp
+// GFX1250: v_cmpx_nle_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xac,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nlg_f16_e64 v1, v2
+// GFX1250: v_cmpx_nlg_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x8a,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_nlg_f16_e64 v255, v255
+// GFX1250: v_cmpx_nlg_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x8a,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_nlg_f16_e64 s1, s2
+// GFX1250: v_cmpx_nlg_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x8a,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_nlg_f16_e64 s105, s105
+// GFX1250: v_cmpx_nlg_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x8a,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_nlg_f16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_nlg_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x8a,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_nlg_f16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_nlg_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x8a,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_nlg_f16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_nlg_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x8a,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_nlg_f16_e64 m0, 0.5
+// GFX1250: v_cmpx_nlg_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x8a,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_nlg_f16_e64 exec_lo, -1
+// GFX1250: v_cmpx_nlg_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x8a,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_nlg_f16_e64 |exec_hi|, null
+// GFX1250: v_cmpx_nlg_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x8a,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_nlg_f16_e64 null, exec_lo
+// GFX1250: v_cmpx_nlg_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x8a,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_nlg_f16_e64 -1, exec_hi
+// GFX1250: v_cmpx_nlg_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x8a,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_nlg_f16_e64 0.5, -m0
+// GFX1250: v_cmpx_nlg_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x8a,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_nlg_f16_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_nlg_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x8a,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_nlg_f16_e64 -|0xfe0b|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_nlg_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x8a,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 v1, v2
+// GFX1250: v_cmpx_nlg_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x9a,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_nlg_f32_e64 v255, v255
+// GFX1250: v_cmpx_nlg_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x9a,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_nlg_f32_e64 s1, s2
+// GFX1250: v_cmpx_nlg_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x9a,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 s105, s105
+// GFX1250: v_cmpx_nlg_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x9a,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_nlg_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x9a,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_nlg_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x9a,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nlg_f32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_nlg_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x9a,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_nlg_f32_e64 m0, 0.5
+// GFX1250: v_cmpx_nlg_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x9a,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_nlg_f32_e64 exec_lo, -1
+// GFX1250: v_cmpx_nlg_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x9a,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_nlg_f32_e64 |exec_hi|, null
+// GFX1250: v_cmpx_nlg_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x9a,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 null, exec_lo
+// GFX1250: v_cmpx_nlg_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x9a,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 -1, exec_hi
+// GFX1250: v_cmpx_nlg_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x9a,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_nlg_f32_e64 0.5, -m0
+// GFX1250: v_cmpx_nlg_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x9a,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_nlg_f32_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_nlg_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x9a,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_nlg_f32_e64 -|0xaf123456|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_nlg_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x9a,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nlg_f64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_nlg_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xaa,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_nlg_f64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_nlg_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xaa,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_nlg_f64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_nlg_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xaa,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_nlg_f64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_nlg_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xaa,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_nlg_f64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_nlg_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xaa,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_nlg_f64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_nlg_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xaa,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nlg_f64_e64 -|exec|, src_scc
+// GFX1250: v_cmpx_nlg_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xaa,0xd4,0x7e,0xfa,0x01,0x20]
+
+v_cmpx_nlg_f64_e64 null, 0.5
+// GFX1250: v_cmpx_nlg_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xaa,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_nlg_f64_e64 -1, -1
+// GFX1250: v_cmpx_nlg_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xaa,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_nlg_f64_e64 0.5, null
+// GFX1250: v_cmpx_nlg_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xaa,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_nlg_f64_e64 -|src_scc|, -|exec|
+// GFX1250: v_cmpx_nlg_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xaa,0xd4,0xfd,0xfc,0x00,0x60]
+
+v_cmpx_nlg_f64_e64 0xaf123456, -|vcc| clamp
+// GFX1250: v_cmpx_nlg_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xaa,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nlt_f16_e64 v1, v2
+// GFX1250: v_cmpx_nlt_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x8e,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_nlt_f16_e64 v255, v255
+// GFX1250: v_cmpx_nlt_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x8e,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_nlt_f16_e64 s1, s2
+// GFX1250: v_cmpx_nlt_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x8e,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_nlt_f16_e64 s105, s105
+// GFX1250: v_cmpx_nlt_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x8e,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_nlt_f16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_nlt_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x8e,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_nlt_f16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_nlt_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x8e,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_nlt_f16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_nlt_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x8e,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_nlt_f16_e64 m0, 0.5
+// GFX1250: v_cmpx_nlt_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x8e,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_nlt_f16_e64 exec_lo, -1
+// GFX1250: v_cmpx_nlt_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x8e,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_nlt_f16_e64 |exec_hi|, null
+// GFX1250: v_cmpx_nlt_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x8e,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_nlt_f16_e64 null, exec_lo
+// GFX1250: v_cmpx_nlt_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x8e,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_nlt_f16_e64 -1, exec_hi
+// GFX1250: v_cmpx_nlt_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x8e,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_nlt_f16_e64 0.5, -m0
+// GFX1250: v_cmpx_nlt_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x8e,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_nlt_f16_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_nlt_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x8e,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_nlt_f16_e64 -|0xfe0b|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_nlt_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x8e,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 v1, v2
+// GFX1250: v_cmpx_nlt_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x9e,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_nlt_f32_e64 v255, v255
+// GFX1250: v_cmpx_nlt_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x9e,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_nlt_f32_e64 s1, s2
+// GFX1250: v_cmpx_nlt_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x9e,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 s105, s105
+// GFX1250: v_cmpx_nlt_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x9e,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_nlt_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x9e,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_nlt_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x9e,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nlt_f32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_nlt_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x9e,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_nlt_f32_e64 m0, 0.5
+// GFX1250: v_cmpx_nlt_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x9e,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_nlt_f32_e64 exec_lo, -1
+// GFX1250: v_cmpx_nlt_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x9e,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_nlt_f32_e64 |exec_hi|, null
+// GFX1250: v_cmpx_nlt_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x9e,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 null, exec_lo
+// GFX1250: v_cmpx_nlt_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x9e,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 -1, exec_hi
+// GFX1250: v_cmpx_nlt_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x9e,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_nlt_f32_e64 0.5, -m0
+// GFX1250: v_cmpx_nlt_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x9e,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_nlt_f32_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_nlt_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x9e,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_nlt_f32_e64 -|0xaf123456|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_nlt_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x9e,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nlt_f64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_nlt_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xae,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_nlt_f64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_nlt_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xae,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_nlt_f64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_nlt_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xae,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_nlt_f64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_nlt_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xae,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_nlt_f64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_nlt_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xae,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_nlt_f64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_nlt_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xae,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_nlt_f64_e64 -|exec|, src_scc
+// GFX1250: v_cmpx_nlt_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xae,0xd4,0x7e,0xfa,0x01,0x20]
+
+v_cmpx_nlt_f64_e64 null, 0.5
+// GFX1250: v_cmpx_nlt_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xae,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_nlt_f64_e64 -1, -1
+// GFX1250: v_cmpx_nlt_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xae,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_nlt_f64_e64 0.5, null
+// GFX1250: v_cmpx_nlt_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xae,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_nlt_f64_e64 -|src_scc|, -|exec|
+// GFX1250: v_cmpx_nlt_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xae,0xd4,0xfd,0xfc,0x00,0x60]
+
+v_cmpx_nlt_f64_e64 0xaf123456, -|vcc| clamp
+// GFX1250: v_cmpx_nlt_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xae,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+v_cmpx_o_f16_e64 v1, v2
+// GFX1250: v_cmpx_o_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x87,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_o_f16_e64 v255, v255
+// GFX1250: v_cmpx_o_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x87,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_o_f16_e64 s1, s2
+// GFX1250: v_cmpx_o_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x87,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_o_f16_e64 s105, s105
+// GFX1250: v_cmpx_o_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x87,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_o_f16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_o_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x87,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_o_f16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_o_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x87,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_o_f16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_o_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x87,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_o_f16_e64 m0, 0.5
+// GFX1250: v_cmpx_o_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x87,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_o_f16_e64 exec_lo, -1
+// GFX1250: v_cmpx_o_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x87,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_o_f16_e64 |exec_hi|, null
+// GFX1250: v_cmpx_o_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x87,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_o_f16_e64 null, exec_lo
+// GFX1250: v_cmpx_o_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x87,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_o_f16_e64 -1, exec_hi
+// GFX1250: v_cmpx_o_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x87,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_o_f16_e64 0.5, -m0
+// GFX1250: v_cmpx_o_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x87,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_o_f16_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_o_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x87,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_o_f16_e64 -|0xfe0b|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_o_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x87,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_o_f32_e64 v1, v2
+// GFX1250: v_cmpx_o_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x97,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_o_f32_e64 v255, v255
+// GFX1250: v_cmpx_o_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x97,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_o_f32_e64 s1, s2
+// GFX1250: v_cmpx_o_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x97,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_o_f32_e64 s105, s105
+// GFX1250: v_cmpx_o_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x97,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_o_f32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_o_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x97,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_o_f32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_o_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x97,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_o_f32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_o_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x97,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_o_f32_e64 m0, 0.5
+// GFX1250: v_cmpx_o_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x97,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_o_f32_e64 exec_lo, -1
+// GFX1250: v_cmpx_o_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x97,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_o_f32_e64 |exec_hi|, null
+// GFX1250: v_cmpx_o_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x97,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_o_f32_e64 null, exec_lo
+// GFX1250: v_cmpx_o_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x97,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_o_f32_e64 -1, exec_hi
+// GFX1250: v_cmpx_o_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x97,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_o_f32_e64 0.5, -m0
+// GFX1250: v_cmpx_o_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x97,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_o_f32_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_o_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x97,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_o_f32_e64 -|0xaf123456|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_o_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x97,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+v_cmpx_o_f64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_o_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xa7,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_o_f64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_o_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xa7,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_o_f64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_o_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xa7,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_o_f64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_o_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xa7,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_o_f64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_o_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xa7,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_o_f64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_o_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xa7,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_o_f64_e64 -|exec|, src_scc
+// GFX1250: v_cmpx_o_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xa7,0xd4,0x7e,0xfa,0x01,0x20]
+
+v_cmpx_o_f64_e64 null, 0.5
+// GFX1250: v_cmpx_o_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xa7,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_o_f64_e64 -1, -1
+// GFX1250: v_cmpx_o_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xa7,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_o_f64_e64 0.5, null
+// GFX1250: v_cmpx_o_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xa7,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_o_f64_e64 -|src_scc|, -|exec|
+// GFX1250: v_cmpx_o_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xa7,0xd4,0xfd,0xfc,0x00,0x60]
+
+v_cmpx_o_f64_e64 0xaf123456, -|vcc| clamp
+// GFX1250: v_cmpx_o_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xa7,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+v_cmpx_u_f16_e64 v1, v2
+// GFX1250: v_cmpx_u_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x88,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_u_f16_e64 v255, v255
+// GFX1250: v_cmpx_u_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x88,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_u_f16_e64 s1, s2
+// GFX1250: v_cmpx_u_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x88,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_u_f16_e64 s105, s105
+// GFX1250: v_cmpx_u_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x88,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_u_f16_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_u_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x88,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_u_f16_e64 vcc_hi, 0xfe0b
+// GFX1250: v_cmpx_u_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x88,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_u_f16_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_u_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x88,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_u_f16_e64 m0, 0.5
+// GFX1250: v_cmpx_u_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x88,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_u_f16_e64 exec_lo, -1
+// GFX1250: v_cmpx_u_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x88,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_u_f16_e64 |exec_hi|, null
+// GFX1250: v_cmpx_u_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x88,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_u_f16_e64 null, exec_lo
+// GFX1250: v_cmpx_u_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x88,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_u_f16_e64 -1, exec_hi
+// GFX1250: v_cmpx_u_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x88,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_u_f16_e64 0.5, -m0
+// GFX1250: v_cmpx_u_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x88,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_u_f16_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_u_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x88,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_u_f16_e64 -|0xfe0b|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_u_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x88,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+v_cmpx_u_f32_e64 v1, v2
+// GFX1250: v_cmpx_u_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x98,0xd4,0x01,0x05,0x02,0x00]
+
+v_cmpx_u_f32_e64 v255, v255
+// GFX1250: v_cmpx_u_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x98,0xd4,0xff,0xff,0x03,0x00]
+
+v_cmpx_u_f32_e64 s1, s2
+// GFX1250: v_cmpx_u_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x98,0xd4,0x01,0x04,0x00,0x00]
+
+v_cmpx_u_f32_e64 s105, s105
+// GFX1250: v_cmpx_u_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x98,0xd4,0x69,0xd2,0x00,0x00]
+
+v_cmpx_u_f32_e64 vcc_lo, ttmp15
+// GFX1250: v_cmpx_u_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x98,0xd4,0x6a,0xf6,0x00,0x00]
+
+v_cmpx_u_f32_e64 vcc_hi, 0xaf123456
+// GFX1250: v_cmpx_u_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x98,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_u_f32_e64 ttmp15, src_scc
+// GFX1250: v_cmpx_u_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x98,0xd4,0x7b,0xfa,0x01,0x00]
+
+v_cmpx_u_f32_e64 m0, 0.5
+// GFX1250: v_cmpx_u_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x98,0xd4,0x7d,0xe0,0x01,0x00]
+
+v_cmpx_u_f32_e64 exec_lo, -1
+// GFX1250: v_cmpx_u_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x98,0xd4,0x7e,0x82,0x01,0x00]
+
+v_cmpx_u_f32_e64 |exec_hi|, null
+// GFX1250: v_cmpx_u_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x98,0xd4,0x7f,0xf8,0x00,0x00]
+
+v_cmpx_u_f32_e64 null, exec_lo
+// GFX1250: v_cmpx_u_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x98,0xd4,0x7c,0xfc,0x00,0x00]
+
+v_cmpx_u_f32_e64 -1, exec_hi
+// GFX1250: v_cmpx_u_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x98,0xd4,0xc1,0xfe,0x00,0x00]
+
+v_cmpx_u_f32_e64 0.5, -m0
+// GFX1250: v_cmpx_u_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x98,0xd4,0xf0,0xfa,0x00,0x40]
+
+v_cmpx_u_f32_e64 -src_scc, |vcc_lo|
+// GFX1250: v_cmpx_u_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x98,0xd4,0xfd,0xd4,0x00,0x20]
+
+v_cmpx_u_f32_e64 -|0xaf123456|, -|vcc_hi| clamp
+// GFX1250: v_cmpx_u_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x98,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+v_cmpx_u_f64_e64 v[2:3], v[2:3]
+// GFX1250: v_cmpx_u_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xa8,0xd4,0x02,0x05,0x02,0x00]
+
+v_cmpx_u_f64_e64 v[254:255], v[254:255]
+// GFX1250: v_cmpx_u_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xa8,0xd4,0xfe,0xfd,0x03,0x00]
+
+v_cmpx_u_f64_e64 s[2:3], s[4:5]
+// GFX1250: v_cmpx_u_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xa8,0xd4,0x02,0x08,0x00,0x00]
+
+v_cmpx_u_f64_e64 s[104:105], s[104:105]
+// GFX1250: v_cmpx_u_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xa8,0xd4,0x68,0xd0,0x00,0x00]
+
+v_cmpx_u_f64_e64 vcc, ttmp[14:15]
+// GFX1250: v_cmpx_u_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xa8,0xd4,0x6a,0xf4,0x00,0x00]
+
+v_cmpx_u_f64_e64 ttmp[14:15], 0xaf123456
+// GFX1250: v_cmpx_u_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xa8,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+v_cmpx_u_f64_e64 -|exec|, src_scc
+// GFX1250: v_cmpx_u_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xa8,0xd4,0x7e,0xfa,0x01,0x20]
+
+v_cmpx_u_f64_e64 null, 0.5
+// GFX1250: v_cmpx_u_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xa8,0xd4,0x7c,0xe0,0x01,0x00]
+
+v_cmpx_u_f64_e64 -1, -1
+// GFX1250: v_cmpx_u_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xa8,0xd4,0xc1,0x82,0x01,0x00]
+
+v_cmpx_u_f64_e64 0.5, null
+// GFX1250: v_cmpx_u_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xa8,0xd4,0xf0,0xf8,0x00,0x00]
+
+v_cmpx_u_f64_e64 -|src_scc|, -|exec|
+// GFX1250: v_cmpx_u_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xa8,0xd4,0xfd,0xfc,0x00,0x60]
+
+v_cmpx_u_f64_e64 0xaf123456, -|vcc| clamp
+// GFX1250: v_cmpx_u_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xa8,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3p_dpp16.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3p_dpp16.s
new file mode 100644
index 0000000..2875d3e
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3p_dpp16.s
@@ -0,0 +1,14 @@
+// RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -show-encoding < %s | FileCheck --check-prefix=GFX1250 %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -show-encoding %s 2>&1 | FileCheck --check-prefix=GFX12-ERR --implicit-check-not=error: --strict-whitespace %s
+
+v_fma_mix_f32_bf16 v0, v1, v2, v3 op_sel:[0,0,0] row_ror:7 bank_mask:0x1 bound_ctrl:0
+// GFX1250: v_fma_mix_f32_bf16_e64_dpp v0, v1, v2, v3 row_ror:7 row_mask:0xf bank_mask:0x1 ; encoding: [0x00,0x00,0x3d,0xcc,0xfa,0x04,0x0e,0x04,0x01,0x27,0x01,0xf1]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+v_fma_mixlo_bf16 v0, v1, v2, v3 op_sel_hi:[1,1,1] clamp quad_perm:[0,2,3,1] row_mask:0x0
+// GFX1250: v_fma_mixlo_bf16_e64_dpp v0, v1, v2, v3 op_sel_hi:[1,1,1] clamp quad_perm:[0,2,3,1] row_mask:0x0 bank_mask:0xf ; encoding: [0x00,0xc0,0x3e,0xcc,0xfa,0x04,0x0e,0x1c,0x01,0x78,0x00,0x0f]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+v_fma_mixhi_bf16 v0, v1, v2, v3 op_sel_hi:[1,1,1] clamp quad_perm:[0,2,3,1] row_mask:0x0
+// GFX1250: v_fma_mixhi_bf16_e64_dpp v0, v1, v2, v3 op_sel_hi:[1,1,1] clamp quad_perm:[0,2,3,1] row_mask:0x0 bank_mask:0xf ; encoding: [0x00,0xc0,0x3f,0xcc,0xfa,0x04,0x0e,0x1c,0x01,0x78,0x00,0x0f]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3p_dpp8.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3p_dpp8.s
new file mode 100644
index 0000000..13b8e21
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3p_dpp8.s
@@ -0,0 +1,26 @@
+// RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -show-encoding < %s | FileCheck --check-prefix=GFX1250 %s
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1200 -show-encoding %s 2>&1 | FileCheck --check-prefix=GFX12-ERR --implicit-check-not=error: --strict-whitespace %s
+
+v_fma_mix_f32_bf16 v0, v1, v2, v3 dpp8:[2,2,2,2,4,4,4,4]
+// GFX1250: v_fma_mix_f32_bf16_e64_dpp v0, v1, v2, v3 dpp8:[2,2,2,2,4,4,4,4] ; encoding: [0x00,0x00,0x3d,0xcc,0xe9,0x04,0x0e,0x04,0x01,0x92,0x44,0x92]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+v_fma_mix_f32_bf16 v0, v1, v2, v3 clamp dpp8:[2,2,2,2,4,4,4,4] fi:1
+// GFX1250: v_fma_mix_f32_bf16_e64_dpp v0, v1, v2, v3 clamp dpp8:[2,2,2,2,4,4,4,4] fi:1 ; encoding: [0x00,0x80,0x3d,0xcc,0xea,0x04,0x0e,0x04,0x01,0x92,0x44,0x92]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+v_fma_mixlo_bf16 v0, abs(v1), -v2, abs(v3) dpp8:[2,2,2,2,4,4,4,4]
+// GFX1250: v_fma_mixlo_bf16_e64_dpp v0, |v1|, -v2, |v3| dpp8:[2,2,2,2,4,4,4,4] ; encoding: [0x00,0x05,0x3e,0xcc,0xe9,0x04,0x0e,0x44,0x01,0x92,0x44,0x92]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+v_fma_mixlo_bf16 v0, abs(v1), -v2, abs(v3) op_sel:[1,0,0] op_sel_hi:[1,0,0] dpp8:[2,2,2,2,4,4,4,4]
+// GFX1250: v_fma_mixlo_bf16_e64_dpp v0, |v1|, -v2, |v3| op_sel:[1,0,0] op_sel_hi:[1,0,0] dpp8:[2,2,2,2,4,4,4,4] ; encoding: [0x00,0x0d,0x3e,0xcc,0xe9,0x04,0x0e,0x4c,0x01,0x92,0x44,0x92]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+v_fma_mixhi_bf16 v0, abs(v1), -v2, abs(v3) dpp8:[2,2,2,2,4,4,4,4]
+// GFX1250: v_fma_mixhi_bf16_e64_dpp v0, |v1|, -v2, |v3| dpp8:[2,2,2,2,4,4,4,4] ; encoding: [0x00,0x05,0x3f,0xcc,0xe9,0x04,0x0e,0x44,0x01,0x92,0x44,0x92]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+v_fma_mixhi_bf16 v0, abs(v1), -v2, abs(v3) op_sel:[1,0,0] op_sel_hi:[1,0,0] dpp8:[2,2,2,2,4,4,4,4]
+// GFX1250: v_fma_mixhi_bf16_e64_dpp v0, |v1|, -v2, |v3| op_sel:[1,0,0] op_sel_hi:[1,0,0] dpp8:[2,2,2,2,4,4,4,4] ; encoding: [0x00,0x0d,0x3f,0xcc,0xe9,0x04,0x0e,0x4c,0x01,0x92,0x44,0x92]
+// GFX12-ERR: :[[@LINE-2]]:{{[0-9]+}}: error: instruction not supported on this GPU
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3p_err.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3p_err.s
new file mode 100644
index 0000000..1ea64de
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3p_err.s
@@ -0,0 +1,74 @@
+// NOTE: Assertions have been autogenerated by utils/update_mc_test_checks.py UTC_ARGS: --version 5
+// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1250 -show-encoding %s 2>&1 | FileCheck --check-prefix=GFX12-ERR --implicit-check-not=error: --strict-whitespace %s
+
+v_pk_fma_f32 v[8:9], s[0:1], v[0:1], v[4:5]
+// GFX12-ERR: :[[@LINE-1]]:1: error: invalid op_sel operand
+
+v_pk_fma_f32 v[8:9], v[0:1], s[0:1], v[4:5]
+// GFX12-ERR: :[[@LINE-1]]:1: error: invalid op_sel operand
+
+v_pk_fma_f32 v[8:9], v[0:1], v[4:5], s[0:1]
+// GFX12-ERR: :[[@LINE-1]]:1: error: invalid op_sel operand
+
+v_pk_fma_f32 v[8:9], s[0:1], v[0:1], v[4:5] op_sel:[1,0,0] op_sel_hi:[0,0,0]
+// GFX12-ERR: :[[@LINE-1]]:45: error: invalid op_sel operand
+
+v_pk_fma_f32 v[8:9], s[0:1], v[0:1], v[4:5] op_sel:[1,0,0] op_sel_hi:[1,0,0]
+// GFX12-ERR: :[[@LINE-1]]:45: error: invalid op_sel operand
+
+v_pk_fma_f32 v[8:9], v[0:1], s[0:1], v[4:5] op_sel:[0,1,0] op_sel_hi:[0,0,0]
+// GFX12-ERR: :[[@LINE-1]]:45: error: invalid op_sel operand
+
+v_pk_fma_f32 v[8:9], v[0:1], v[4:5], s[0:1] op_sel:[0,0,1] op_sel_hi:[0,0,0]
+// GFX12-ERR: :[[@LINE-1]]:45: error: invalid op_sel operand
+
+v_pk_mul_f32 v[8:9], s[0:1], v[0:1]
+// GFX12-ERR: :[[@LINE-1]]:1: error: invalid op_sel operand
+
+v_pk_mul_f32 v[8:9], v[0:1], s[0:1]
+// GFX12-ERR: :[[@LINE-1]]:1: error: invalid op_sel operand
+
+v_pk_mul_f32 v[8:9], s[0:1], v[0:1] op_sel:[1,0] op_sel_hi:[0,0]
+// GFX12-ERR: :[[@LINE-1]]:37: error: invalid op_sel operand
+
+v_pk_mul_f32 v[8:9], v[0:1], s[0:1] op_sel:[0,1] op_sel_hi:[0,0]
+// GFX12-ERR: :[[@LINE-1]]:37: error: invalid op_sel operand
+
+v_pk_mul_f32 v[8:9], v[0:1], s[0:1] op_sel:[0,1] op_sel_hi:[0,1]
+// GFX12-ERR: :[[@LINE-1]]:37: error: invalid op_sel operand
+
+v_pk_add_f32 v[8:9], s[0:1], v[0:1]
+// GFX12-ERR: :[[@LINE-1]]:1: error: invalid op_sel operand
+
+v_pk_add_f32 v[8:9], v[0:1], s[0:1]
+// GFX12-ERR: :[[@LINE-1]]:1: error: invalid op_sel operand
+
+v_pk_add_f32 v[8:9], s[0:1], v[0:1] op_sel:[1,0] op_sel_hi:[0,0]
+// GFX12-ERR: :[[@LINE-1]]:37: error: invalid op_sel operand
+
+v_pk_add_f32 v[8:9], v[0:1], s[0:1] op_sel:[0,1] op_sel_hi:[0,0]
+// GFX12-ERR: :[[@LINE-1]]:37: error: invalid op_sel operand
+
+v_pk_add_f32 v[8:9], v[0:1], s[0:1] op_sel:[0,1] op_sel_hi:[0,1]
+// GFX12-ERR: :[[@LINE-1]]:37: error: invalid op_sel operand
+
+v_pk_fma_f32 v[8:9], exec, v[0:1], v[4:5]
+// GFX12-ERR: :[[@LINE-1]]:1: error: invalid op_sel operand
+
+v_pk_fma_f32 v[8:9], v[0:1], exec, v[4:5]
+// GFX12-ERR: :[[@LINE-1]]:1: error: invalid op_sel operand
+
+v_pk_fma_f32 v[8:9], v[0:1], v[4:5], exec
+// GFX12-ERR: :[[@LINE-1]]:1: error: invalid op_sel operand
+
+v_pk_mul_f32 v[8:9], exec, v[0:1]
+// GFX12-ERR: :[[@LINE-1]]:1: error: invalid op_sel operand
+
+v_pk_mul_f32 v[8:9], v[0:1], exec
+// GFX12-ERR: :[[@LINE-1]]:1: error: invalid op_sel operand
+
+v_pk_add_f32 v[8:9], exec, v[0:1]
+// GFX12-ERR: :[[@LINE-1]]:1: error: invalid op_sel operand
+
+v_pk_add_f32 v[8:9], v[0:1], exec
+// GFX12-ERR: :[[@LINE-1]]:1: error: invalid op_sel operand
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vsample_err.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vsample_err.s
new file mode 100644
index 0000000..50766f13
--- /dev/null
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vsample_err.s
@@ -0,0 +1,175 @@
+; RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1250 -show-encoding %s 2>&1 | FileCheck --check-prefix=GFX1250-ERR --implicit-check-not=error: --strict-whitespace %s
+
+image_sample v64, v32, s[4:11], s[100:103] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_d v64, [v32, v33, v34], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_l v64, [v32, v33], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_b v64, [v32, v33], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_lz v64, v32, s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_c v64, [v32, v33], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_c_d v64, [v32, v33, v34, v35], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_c_l v64, [v32, v33, v34], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_c_b v64, [v32, v33, v34], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_c_lz v64, [v32, v33], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_o v64, [v32, v33], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_d_o v64, [v32, v33, v34, v35], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_l_o v64, [v32, v33, v34], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_b_o v64, [v32, v33, v34], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_lz_o v64, [v32, v33], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_c_o v64, [v32, v33, v34], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_c_d_o v64, [v32, v33, v34, v[35:36]], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_c_l_o v64, [v32, v33, v34, v35], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_c_b_o v64, [v32, v33, v34, v35], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_c_lz_o v64, [v32, v33, v34], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_gather4 v[64:67], [v32, v33], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_2D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_gather4_l v[64:67], [v32, v33, v34], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_2D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_gather4_b v[64:67], [v32, v33, v34], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_2D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_gather4_lz v[64:67], [v32, v33], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_2D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_gather4_c v[64:67], [v32, v33, v34], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_2D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_gather4_c_lz v[64:67], [v32, v33, v34], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_2D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_gather4_o v[64:67], [v32, v33, v34], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_2D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_gather4_lz_o v[64:67], [v32, v33, v34], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_2D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_gather4_c_lz_o v[64:67], [v32, v33, v34, v35], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_2D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_get_lod v64, v32, s[4:11], s[100:103] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_d_g16 v64, [v32, v33, v34], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_c_d_g16 v64, [v32, v33, v34, v35], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_d_o_g16 v64, [v32, v33, v34, v35], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_c_d_o_g16 v64, [v32, v33, v34, v[35:36]], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_cl v64, [v32, v33], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_d_cl v64, [v32, v33, v34, v35], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_b_cl v64, [v32, v33, v34], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_c_cl v64, [v32, v33, v34], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_c_d_cl v64, [v32, v33, v34, v[35:36]], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_c_b_cl v64, [v32, v33, v34, v35], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_cl_o v64, [v32, v33, v34], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_d_cl_o v64, [v32, v33, v34, v[35:36]], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_b_cl_o v64, [v32, v33, v34, v35], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_c_cl_o v64, [v32, v33, v34, v35], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_c_d_cl_o v64, [v32, v33, v34, v[35:37]], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_c_b_cl_o v64, [v32, v33, v34, v[35:36]], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_c_d_cl_g16 v64, [v32, v33, v34, v[35:36]], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_d_cl_o_g16 v64, [v32, v33, v34, v[35:36]], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_c_d_cl_o_g16 v64, [v32, v33, v34, v[35:37]], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_sample_d_cl_g16 v64, [v32, v33, v34, v35], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_1D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_gather4_cl v[64:67], [v32, v33, v34], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_2D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_gather4_b_cl v[64:67], [v32, v33, v34, v35], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_2D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_gather4_c_cl v[64:67], [v32, v33, v34, v35], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_2D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_gather4_c_l v[64:67], [v32, v33, v34, v35], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_2D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_gather4_c_b v[64:67], [v32, v33, v34, v35], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_2D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_gather4_c_b_cl v[64:67], [v32, v33, v34, v[35:36]], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_2D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_gather4h v[64:67], [v32, v33], s[4:11], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_2D
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
+
+image_msaa_load v[1:4], [v5, v6, v7], s[8:15] dmask:0x1 dim:SQ_RSRC_IMG_2D_MSAA
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction not supported on this GPU
diff --git a/llvm/test/MC/AMDGPU/gfx1250_err.s b/llvm/test/MC/AMDGPU/gfx1250_err.s
index e4598fe..676eb48 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_err.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_err.s
@@ -1,5 +1,30 @@
// RUN: not llvm-mc -triple=amdgcn -mcpu=gfx1250 -show-encoding %s 2>&1 | FileCheck --check-prefixes=GFX1250-ERR --implicit-check-not=error: -strict-whitespace %s
+s_load_b32 s4, s[2:3], 10 th:TH_LOAD_NT th:TH_LOAD_NT
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX1250-ERR: s_load_b32 s4, s[2:3], 10 th:TH_LOAD_NT th:TH_LOAD_NT
+// GFX1250-ERR: ^
+
+s_load_b32 s4, s[2:3], 10 scope:SCOPE_SE scope:SCOPE_SE
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX1250-ERR: s_load_b32 s4, s[2:3], 10 scope:SCOPE_SE scope:SCOPE_SE
+// GFX1250-ERR: ^
+
+s_load_b32 s4, s[2:3], 10 nv nv
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+// GFX1250-ERR: s_load_b32 s4, s[2:3], 10 nv nv
+// GFX1250-ERR: ^
+
+v_mov_b64 v[4:5], v[2:3] quad_perm:[1,1,1,1]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1250-ERR: v_mov_b64 v[4:5], v[2:3] quad_perm:[1,1,1,1]
+// GFX1250-ERR: ^
+
+v_mov_b64 v[4:5], v[2:3] dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1250-ERR: v_mov_b64 v[4:5], v[2:3] dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250-ERR: ^
+
// For v_dual_cndmask_b32 use of the explicit src2 forces VOPD3 form even if it is vcc_lo.
// If src2 is omitted then it forces VOPD form. As a result a proper form of the instruction
// has to be used if the other component of the dual instruction cannot be used if that
@@ -137,6 +162,11 @@ v_fmaak_f64 v[4:5], 0x7e8, v[8:9], lit64(0x7e8)
// GFX1250-ERR: v_fmaak_f64 v[4:5], 0x7e8, v[8:9], lit64(0x7e8)
// GFX1250-ERR: ^
+v_pk_add_min_i16 v10, |v1|, v2, v3
+// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+// GFX1250-ERR: v_pk_add_min_i16 v10, |v1|, v2, v3
+// GFX1250-ERR: ^
+
v_pk_add_min_i16 v10, -v1, v2, v3
// GFX1250-ERR: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
// GFX1250-ERR: v_pk_add_min_i16 v10, -v1, v2, v3
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_ds.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_ds.txt
index 0870aa7..13440a0 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_ds.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_ds.txt
@@ -1,5 +1,1109 @@
# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -disassemble -show-encoding < %s | FileCheck -strict-whitespace -check-prefix=GFX1250 %s
+# GFX1250: ds_add_f32 v1, v2 ; encoding: [0x00,0x00,0x54,0xd8,0x01,0x02,0x00,0x00]
+0x00,0x00,0x54,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_add_f32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x54,0xd8,0x01,0x02,0x00,0x00]
+0xff,0xff,0x54,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_add_f32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x54,0xd8,0xff,0xff,0x00,0x00]
+0x04,0x00,0x54,0xd8,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_add_rtn_f32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xe4,0xd9,0xff,0xff,0x00,0xff]
+0x04,0x00,0xe4,0xd9,0xff,0xff,0x00,0xff
+
+# GFX1250: ds_add_rtn_f32 v5, v1, v2 ; encoding: [0x00,0x00,0xe4,0xd9,0x01,0x02,0x00,0x05]
+0x00,0x00,0xe4,0xd9,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_add_rtn_f32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xe4,0xd9,0x01,0x02,0x00,0x05]
+0xff,0xff,0xe4,0xd9,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_add_rtn_u32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0x80,0xd8,0xff,0xff,0x00,0xff]
+0x04,0x00,0x80,0xd8,0xff,0xff,0x00,0xff
+
+# GFX1250: ds_add_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0x80,0xd8,0x01,0x02,0x00,0x05]
+0x00,0x00,0x80,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_add_rtn_u32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0x80,0xd8,0x01,0x02,0x00,0x05]
+0xff,0xff,0x80,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_add_rtn_u64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x80,0xd9,0xff,0xfe,0x00,0xfe]
+0x04,0x00,0x80,0xd9,0xff,0xfe,0x00,0xfe
+
+# GFX1250: ds_add_rtn_u64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x80,0xd9,0x01,0x02,0x00,0x06]
+0x00,0x00,0x80,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_add_rtn_u64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x80,0xd9,0x01,0x02,0x00,0x06]
+0xff,0xff,0x80,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_add_u32 v1, v2 ; encoding: [0x00,0x00,0x00,0xd8,0x01,0x02,0x00,0x00]
+0x00,0x00,0x00,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_add_u32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x00,0xd8,0x01,0x02,0x00,0x00]
+0xff,0xff,0x00,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_add_u32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x00,0xd8,0xff,0xff,0x00,0x00]
+0x04,0x00,0x00,0xd8,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_add_u64 v1, v[2:3] ; encoding: [0x00,0x00,0x00,0xd9,0x01,0x02,0x00,0x00]
+0x00,0x00,0x00,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_add_u64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x00,0xd9,0x01,0x02,0x00,0x00]
+0xff,0xff,0x00,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_add_u64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x00,0xd9,0xff,0xfe,0x00,0x00]
+0x04,0x00,0x00,0xd9,0xff,0xfe,0x00,0x00
+
+# GFX1250: ds_and_b32 v1, v2 ; encoding: [0x00,0x00,0x24,0xd8,0x01,0x02,0x00,0x00]
+0x00,0x00,0x24,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_and_b32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x24,0xd8,0x01,0x02,0x00,0x00]
+0xff,0xff,0x24,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_and_b32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x24,0xd8,0xff,0xff,0x00,0x00]
+0x04,0x00,0x24,0xd8,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_and_b64 v1, v[2:3] ; encoding: [0x00,0x00,0x24,0xd9,0x01,0x02,0x00,0x00]
+0x00,0x00,0x24,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_and_b64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x24,0xd9,0x01,0x02,0x00,0x00]
+0xff,0xff,0x24,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_and_b64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x24,0xd9,0xff,0xfe,0x00,0x00]
+0x04,0x00,0x24,0xd9,0xff,0xfe,0x00,0x00
+
+# GFX1250: ds_and_rtn_b32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xa4,0xd8,0xff,0xff,0x00,0xff]
+0x04,0x00,0xa4,0xd8,0xff,0xff,0x00,0xff
+
+# GFX1250: ds_and_rtn_b32 v5, v1, v2 ; encoding: [0x00,0x00,0xa4,0xd8,0x01,0x02,0x00,0x05]
+0x00,0x00,0xa4,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_and_rtn_b32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xa4,0xd8,0x01,0x02,0x00,0x05]
+0xff,0xff,0xa4,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_and_rtn_b64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0xa4,0xd9,0xff,0xfe,0x00,0xfe]
+0x04,0x00,0xa4,0xd9,0xff,0xfe,0x00,0xfe
+
+# GFX1250: ds_and_rtn_b64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xa4,0xd9,0x01,0x02,0x00,0x06]
+0x00,0x00,0xa4,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_and_rtn_b64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0xa4,0xd9,0x01,0x02,0x00,0x06]
+0xff,0xff,0xa4,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_append v255 offset:4 ; encoding: [0x04,0x00,0xf8,0xd8,0x00,0x00,0x00,0xff]
+0x04,0x00,0xf8,0xd8,0x00,0x00,0x00,0xff
+
+# GFX1250: ds_append v5 ; encoding: [0x00,0x00,0xf8,0xd8,0x00,0x00,0x00,0x05]
+0x00,0x00,0xf8,0xd8,0x00,0x00,0x00,0x05
+
+# GFX1250: ds_append v5 offset:65535 ; encoding: [0xff,0xff,0xf8,0xd8,0x00,0x00,0x00,0x05]
+0xff,0xff,0xf8,0xd8,0x00,0x00,0x00,0x05
+
+# GFX1250: ds_bpermute_b32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xcc,0xda,0xff,0xff,0x00,0xff]
+0x04,0x00,0xcc,0xda,0xff,0xff,0x00,0xff
+
+# GFX1250: ds_bpermute_b32 v5, v1, v2 ; encoding: [0x00,0x00,0xcc,0xda,0x01,0x02,0x00,0x05]
+0x00,0x00,0xcc,0xda,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_bpermute_b32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xcc,0xda,0x01,0x02,0x00,0x05]
+0xff,0xff,0xcc,0xda,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_cmpstore_b32 v1, v2, v3 ; encoding: [0x00,0x00,0x40,0xd8,0x01,0x02,0x03,0x00]
+0x00,0x00,0x40,0xd8,0x01,0x02,0x03,0x00
+
+# GFX1250: ds_cmpstore_b32 v1, v2, v3 offset:65535 ; encoding: [0xff,0xff,0x40,0xd8,0x01,0x02,0x03,0x00]
+0xff,0xff,0x40,0xd8,0x01,0x02,0x03,0x00
+
+# GFX1250: ds_cmpstore_b32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0x40,0xd8,0xff,0xff,0xff,0x00]
+0x04,0x00,0x40,0xd8,0xff,0xff,0xff,0x00
+
+# GFX1250: ds_cmpstore_b64 v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0x40,0xd9,0x01,0x02,0x04,0x00]
+0x00,0x00,0x40,0xd9,0x01,0x02,0x04,0x00
+
+# GFX1250: ds_cmpstore_b64 v1, v[2:3], v[4:5] offset:65535 ; encoding: [0xff,0xff,0x40,0xd9,0x01,0x02,0x04,0x00]
+0xff,0xff,0x40,0xd9,0x01,0x02,0x04,0x00
+
+# GFX1250: ds_cmpstore_b64 v255, v[254:255], v[254:255] offset:4 ; encoding: [0x04,0x00,0x40,0xd9,0xff,0xfe,0xfe,0x00]
+0x04,0x00,0x40,0xd9,0xff,0xfe,0xfe,0x00
+
+# GFX1250: ds_cmpstore_rtn_b32 v255, v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xc0,0xd8,0xff,0xff,0xff,0xff]
+0x04,0x00,0xc0,0xd8,0xff,0xff,0xff,0xff
+
+# GFX1250: ds_cmpstore_rtn_b32 v5, v1, v2, v3 ; encoding: [0x00,0x00,0xc0,0xd8,0x01,0x02,0x03,0x05]
+0x00,0x00,0xc0,0xd8,0x01,0x02,0x03,0x05
+
+# GFX1250: ds_cmpstore_rtn_b32 v5, v1, v2, v3 offset:65535 ; encoding: [0xff,0xff,0xc0,0xd8,0x01,0x02,0x03,0x05]
+0xff,0xff,0xc0,0xd8,0x01,0x02,0x03,0x05
+
+# GFX1250: ds_cmpstore_rtn_b64 v[254:255], v255, v[254:255], v[254:255] offset:4 ; encoding: [0x04,0x00,0xc0,0xd9,0xff,0xfe,0xfe,0xfe]
+0x04,0x00,0xc0,0xd9,0xff,0xfe,0xfe,0xfe
+
+# GFX1250: ds_cmpstore_rtn_b64 v[6:7], v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0xc0,0xd9,0x01,0x02,0x04,0x06]
+0x00,0x00,0xc0,0xd9,0x01,0x02,0x04,0x06
+
+# GFX1250: ds_cmpstore_rtn_b64 v[6:7], v1, v[2:3], v[4:5] offset:65535 ; encoding: [0xff,0xff,0xc0,0xd9,0x01,0x02,0x04,0x06]
+0xff,0xff,0xc0,0xd9,0x01,0x02,0x04,0x06
+
+# GFX1250: ds_cond_sub_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0xa0,0xda,0x01,0x02,0x00,0x05]
+0x00,0x00,0xa0,0xda,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_cond_sub_rtn_u32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xa0,0xda,0x01,0x02,0x00,0x05]
+0xff,0xff,0xa0,0xda,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_cond_sub_u32 v1, v2 ; encoding: [0x00,0x00,0x60,0xda,0x01,0x02,0x00,0x00]
+0x00,0x00,0x60,0xda,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_cond_sub_u32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x60,0xda,0x01,0x02,0x00,0x00]
+0xff,0xff,0x60,0xda,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_condxchg32_rtn_b64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0xf8,0xd9,0xff,0xfe,0x00,0xfe]
+0x04,0x00,0xf8,0xd9,0xff,0xfe,0x00,0xfe
+
+# GFX1250: ds_condxchg32_rtn_b64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xf8,0xd9,0x01,0x02,0x00,0x06]
+0x00,0x00,0xf8,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_condxchg32_rtn_b64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0xf8,0xd9,0x01,0x02,0x00,0x06]
+0xff,0xff,0xf8,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_consume v255 offset:4 ; encoding: [0x04,0x00,0xf4,0xd8,0x00,0x00,0x00,0xff]
+0x04,0x00,0xf4,0xd8,0x00,0x00,0x00,0xff
+
+# GFX1250: ds_consume v5 ; encoding: [0x00,0x00,0xf4,0xd8,0x00,0x00,0x00,0x05]
+0x00,0x00,0xf4,0xd8,0x00,0x00,0x00,0x05
+
+# GFX1250: ds_consume v5 offset:65535 ; encoding: [0xff,0xff,0xf4,0xd8,0x00,0x00,0x00,0x05]
+0xff,0xff,0xf4,0xd8,0x00,0x00,0x00,0x05
+
+# GFX1250: ds_dec_rtn_u32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0x90,0xd8,0xff,0xff,0x00,0xff]
+0x04,0x00,0x90,0xd8,0xff,0xff,0x00,0xff
+
+# GFX1250: ds_dec_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0x90,0xd8,0x01,0x02,0x00,0x05]
+0x00,0x00,0x90,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_dec_rtn_u32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0x90,0xd8,0x01,0x02,0x00,0x05]
+0xff,0xff,0x90,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_dec_rtn_u64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x90,0xd9,0xff,0xfe,0x00,0xfe]
+0x04,0x00,0x90,0xd9,0xff,0xfe,0x00,0xfe
+
+# GFX1250: ds_dec_rtn_u64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x90,0xd9,0x01,0x02,0x00,0x06]
+0x00,0x00,0x90,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_dec_rtn_u64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x90,0xd9,0x01,0x02,0x00,0x06]
+0xff,0xff,0x90,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_dec_u32 v1, v2 ; encoding: [0x00,0x00,0x10,0xd8,0x01,0x02,0x00,0x00]
+0x00,0x00,0x10,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_dec_u32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x10,0xd8,0x01,0x02,0x00,0x00]
+0xff,0xff,0x10,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_dec_u32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x10,0xd8,0xff,0xff,0x00,0x00]
+0x04,0x00,0x10,0xd8,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_dec_u64 v1, v[2:3] ; encoding: [0x00,0x00,0x10,0xd9,0x01,0x02,0x00,0x00]
+0x00,0x00,0x10,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_dec_u64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x10,0xd9,0x01,0x02,0x00,0x00]
+0xff,0xff,0x10,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_dec_u64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x10,0xd9,0xff,0xfe,0x00,0x00]
+0x04,0x00,0x10,0xd9,0xff,0xfe,0x00,0x00
+
+# GFX1250: ds_inc_rtn_u32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0x8c,0xd8,0xff,0xff,0x00,0xff]
+0x04,0x00,0x8c,0xd8,0xff,0xff,0x00,0xff
+
+# GFX1250: ds_inc_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0x8c,0xd8,0x01,0x02,0x00,0x05]
+0x00,0x00,0x8c,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_inc_rtn_u32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0x8c,0xd8,0x01,0x02,0x00,0x05]
+0xff,0xff,0x8c,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_inc_rtn_u64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x8c,0xd9,0xff,0xfe,0x00,0xfe]
+0x04,0x00,0x8c,0xd9,0xff,0xfe,0x00,0xfe
+
+# GFX1250: ds_inc_rtn_u64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x8c,0xd9,0x01,0x02,0x00,0x06]
+0x00,0x00,0x8c,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_inc_rtn_u64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x8c,0xd9,0x01,0x02,0x00,0x06]
+0xff,0xff,0x8c,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_inc_u32 v1, v2 ; encoding: [0x00,0x00,0x0c,0xd8,0x01,0x02,0x00,0x00]
+0x00,0x00,0x0c,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_inc_u32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x0c,0xd8,0x01,0x02,0x00,0x00]
+0xff,0xff,0x0c,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_inc_u32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x0c,0xd8,0xff,0xff,0x00,0x00]
+0x04,0x00,0x0c,0xd8,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_inc_u64 v1, v[2:3] ; encoding: [0x00,0x00,0x0c,0xd9,0x01,0x02,0x00,0x00]
+0x00,0x00,0x0c,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_inc_u64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x0c,0xd9,0x01,0x02,0x00,0x00]
+0xff,0xff,0x0c,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_inc_u64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x0c,0xd9,0xff,0xfe,0x00,0x00]
+0x04,0x00,0x0c,0xd9,0xff,0xfe,0x00,0x00
+
+# GFX1250: ds_load_2addr_b32 v[254:255], v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0xdc,0xd8,0xff,0x00,0x00,0xfe]
+0x10,0x01,0xdc,0xd8,0xff,0x00,0x00,0xfe
+
+# GFX1250: ds_load_2addr_b32 v[6:7], v1 ; encoding: [0x00,0x00,0xdc,0xd8,0x01,0x00,0x00,0x06]
+0x00,0x00,0xdc,0xd8,0x01,0x00,0x00,0x06
+
+# GFX1250: ds_load_2addr_b32 v[6:7], v1 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xdc,0xd8,0x01,0x00,0x00,0x06]
+0x7f,0xff,0xdc,0xd8,0x01,0x00,0x00,0x06
+
+# GFX1250: ds_load_2addr_b64 v[252:255], v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0xdc,0xd9,0xff,0x00,0x00,0xfc]
+0x10,0x01,0xdc,0xd9,0xff,0x00,0x00,0xfc
+
+# GFX1250: ds_load_2addr_b64 v[6:9], v1 ; encoding: [0x00,0x00,0xdc,0xd9,0x01,0x00,0x00,0x06]
+0x00,0x00,0xdc,0xd9,0x01,0x00,0x00,0x06
+
+# GFX1250: ds_load_2addr_b64 v[6:9], v1 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xdc,0xd9,0x01,0x00,0x00,0x06]
+0x7f,0xff,0xdc,0xd9,0x01,0x00,0x00,0x06
+
+# GFX1250: ds_load_2addr_stride64_b32 v[254:255], v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0xe0,0xd8,0xff,0x00,0x00,0xfe]
+0x10,0x01,0xe0,0xd8,0xff,0x00,0x00,0xfe
+
+# GFX1250: ds_load_2addr_stride64_b32 v[6:7], v1 ; encoding: [0x00,0x00,0xe0,0xd8,0x01,0x00,0x00,0x06]
+0x00,0x00,0xe0,0xd8,0x01,0x00,0x00,0x06
+
+# GFX1250: ds_load_2addr_stride64_b32 v[6:7], v1 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xe0,0xd8,0x01,0x00,0x00,0x06]
+0x7f,0xff,0xe0,0xd8,0x01,0x00,0x00,0x06
+
+# GFX1250: ds_load_2addr_stride64_b64 v[252:255], v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0xe0,0xd9,0xff,0x00,0x00,0xfc]
+0x10,0x01,0xe0,0xd9,0xff,0x00,0x00,0xfc
+
+# GFX1250: ds_load_2addr_stride64_b64 v[6:9], v1 ; encoding: [0x00,0x00,0xe0,0xd9,0x01,0x00,0x00,0x06]
+0x00,0x00,0xe0,0xd9,0x01,0x00,0x00,0x06
+
+# GFX1250: ds_load_2addr_stride64_b64 v[6:9], v1 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xe0,0xd9,0x01,0x00,0x00,0x06]
+0x7f,0xff,0xe0,0xd9,0x01,0x00,0x00,0x06
+
+# GFX1250: ds_load_addtid_b32 v255 offset:4 ; encoding: [0x04,0x00,0xc4,0xda,0x00,0x00,0x00,0xff]
+0x04,0x00,0xc4,0xda,0x00,0x00,0x00,0xff
+
+# GFX1250: ds_load_addtid_b32 v5 ; encoding: [0x00,0x00,0xc4,0xda,0x00,0x00,0x00,0x05]
+0x00,0x00,0xc4,0xda,0x00,0x00,0x00,0x05
+
+# GFX1250: ds_load_addtid_b32 v5 offset:65535 ; encoding: [0xff,0xff,0xc4,0xda,0x00,0x00,0x00,0x05]
+0xff,0xff,0xc4,0xda,0x00,0x00,0x00,0x05
+
+# GFX1250: ds_load_b128 v[252:255], v255 offset:4 ; encoding: [0x04,0x00,0xfc,0xdb,0xff,0x00,0x00,0xfc]
+0x04,0x00,0xfc,0xdb,0xff,0x00,0x00,0xfc
+
+# GFX1250: ds_load_b128 v[6:9], v1 ; encoding: [0x00,0x00,0xfc,0xdb,0x01,0x00,0x00,0x06]
+0x00,0x00,0xfc,0xdb,0x01,0x00,0x00,0x06
+
+# GFX1250: ds_load_b128 v[6:9], v1 offset:65535 ; encoding: [0xff,0xff,0xfc,0xdb,0x01,0x00,0x00,0x06]
+0xff,0xff,0xfc,0xdb,0x01,0x00,0x00,0x06
+
+# GFX1250: ds_load_b32 v255, v255 offset:4 ; encoding: [0x04,0x00,0xd8,0xd8,0xff,0x00,0x00,0xff]
+0x04,0x00,0xd8,0xd8,0xff,0x00,0x00,0xff
+
+# GFX1250: ds_load_b32 v5, v1 ; encoding: [0x00,0x00,0xd8,0xd8,0x01,0x00,0x00,0x05]
+0x00,0x00,0xd8,0xd8,0x01,0x00,0x00,0x05
+
+# GFX1250: ds_load_b32 v5, v1 offset:65535 ; encoding: [0xff,0xff,0xd8,0xd8,0x01,0x00,0x00,0x05]
+0xff,0xff,0xd8,0xd8,0x01,0x00,0x00,0x05
+
+# GFX1250: ds_load_b64 v[254:255], v255 offset:4 ; encoding: [0x04,0x00,0xd8,0xd9,0xff,0x00,0x00,0xfe]
+0x04,0x00,0xd8,0xd9,0xff,0x00,0x00,0xfe
+
+# GFX1250: ds_load_b64 v[6:7], v1 ; encoding: [0x00,0x00,0xd8,0xd9,0x01,0x00,0x00,0x06]
+0x00,0x00,0xd8,0xd9,0x01,0x00,0x00,0x06
+
+# GFX1250: ds_load_b64 v[6:7], v1 offset:65535 ; encoding: [0xff,0xff,0xd8,0xd9,0x01,0x00,0x00,0x06]
+0xff,0xff,0xd8,0xd9,0x01,0x00,0x00,0x06
+
+# GFX1250: ds_load_b96 v[252:254], v255 offset:4 ; encoding: [0x04,0x00,0xf8,0xdb,0xff,0x00,0x00,0xfc]
+0x04,0x00,0xf8,0xdb,0xff,0x00,0x00,0xfc
+
+# GFX1250: ds_load_b96 v[6:8], v1 ; encoding: [0x00,0x00,0xf8,0xdb,0x01,0x00,0x00,0x06]
+0x00,0x00,0xf8,0xdb,0x01,0x00,0x00,0x06
+
+# GFX1250: ds_load_b96 v[6:8], v1 offset:65535 ; encoding: [0xff,0xff,0xf8,0xdb,0x01,0x00,0x00,0x06]
+0xff,0xff,0xf8,0xdb,0x01,0x00,0x00,0x06
+
+# GFX1250: ds_load_i16 v255, v255 offset:4 ; encoding: [0x04,0x00,0xec,0xd8,0xff,0x00,0x00,0xff]
+0x04,0x00,0xec,0xd8,0xff,0x00,0x00,0xff
+
+# GFX1250: ds_load_i16 v5, v1 ; encoding: [0x00,0x00,0xec,0xd8,0x01,0x00,0x00,0x05]
+0x00,0x00,0xec,0xd8,0x01,0x00,0x00,0x05
+
+# GFX1250: ds_load_i16 v5, v1 offset:65535 ; encoding: [0xff,0xff,0xec,0xd8,0x01,0x00,0x00,0x05]
+0xff,0xff,0xec,0xd8,0x01,0x00,0x00,0x05
+
+# GFX1250: ds_load_i8 v255, v255 offset:4 ; encoding: [0x04,0x00,0xe4,0xd8,0xff,0x00,0x00,0xff]
+0x04,0x00,0xe4,0xd8,0xff,0x00,0x00,0xff
+
+# GFX1250: ds_load_i8 v5, v1 ; encoding: [0x00,0x00,0xe4,0xd8,0x01,0x00,0x00,0x05]
+0x00,0x00,0xe4,0xd8,0x01,0x00,0x00,0x05
+
+# GFX1250: ds_load_i8 v5, v1 offset:65535 ; encoding: [0xff,0xff,0xe4,0xd8,0x01,0x00,0x00,0x05]
+0xff,0xff,0xe4,0xd8,0x01,0x00,0x00,0x05
+
+# GFX1250: ds_load_i8_d16 v255, v255 offset:4 ; encoding: [0x04,0x00,0x90,0xda,0xff,0x00,0x00,0xff]
+0x04,0x00,0x90,0xda,0xff,0x00,0x00,0xff
+
+# GFX1250: ds_load_i8_d16 v5, v1 ; encoding: [0x00,0x00,0x90,0xda,0x01,0x00,0x00,0x05]
+0x00,0x00,0x90,0xda,0x01,0x00,0x00,0x05
+
+# GFX1250: ds_load_i8_d16 v5, v1 offset:65535 ; encoding: [0xff,0xff,0x90,0xda,0x01,0x00,0x00,0x05]
+0xff,0xff,0x90,0xda,0x01,0x00,0x00,0x05
+
+# GFX1250: ds_load_i8_d16_hi v255, v255 offset:4 ; encoding: [0x04,0x00,0x94,0xda,0xff,0x00,0x00,0xff]
+0x04,0x00,0x94,0xda,0xff,0x00,0x00,0xff
+
+# GFX1250: ds_load_i8_d16_hi v5, v1 ; encoding: [0x00,0x00,0x94,0xda,0x01,0x00,0x00,0x05]
+0x00,0x00,0x94,0xda,0x01,0x00,0x00,0x05
+
+# GFX1250: ds_load_i8_d16_hi v5, v1 offset:65535 ; encoding: [0xff,0xff,0x94,0xda,0x01,0x00,0x00,0x05]
+0xff,0xff,0x94,0xda,0x01,0x00,0x00,0x05
+
+# GFX1250: ds_load_u16 v255, v255 offset:4 ; encoding: [0x04,0x00,0xf0,0xd8,0xff,0x00,0x00,0xff]
+0x04,0x00,0xf0,0xd8,0xff,0x00,0x00,0xff
+
+# GFX1250: ds_load_u16 v5, v1 ; encoding: [0x00,0x00,0xf0,0xd8,0x01,0x00,0x00,0x05]
+0x00,0x00,0xf0,0xd8,0x01,0x00,0x00,0x05
+
+# GFX1250: ds_load_u16 v5, v1 offset:65535 ; encoding: [0xff,0xff,0xf0,0xd8,0x01,0x00,0x00,0x05]
+0xff,0xff,0xf0,0xd8,0x01,0x00,0x00,0x05
+
+# GFX1250: ds_load_u16_d16 v255, v255 offset:4 ; encoding: [0x04,0x00,0x98,0xda,0xff,0x00,0x00,0xff]
+0x04,0x00,0x98,0xda,0xff,0x00,0x00,0xff
+
+# GFX1250: ds_load_u16_d16 v5, v1 ; encoding: [0x00,0x00,0x98,0xda,0x01,0x00,0x00,0x05]
+0x00,0x00,0x98,0xda,0x01,0x00,0x00,0x05
+
+# GFX1250: ds_load_u16_d16 v5, v1 offset:65535 ; encoding: [0xff,0xff,0x98,0xda,0x01,0x00,0x00,0x05]
+0xff,0xff,0x98,0xda,0x01,0x00,0x00,0x05
+
+# GFX1250: ds_load_u16_d16_hi v255, v255 offset:4 ; encoding: [0x04,0x00,0x9c,0xda,0xff,0x00,0x00,0xff]
+0x04,0x00,0x9c,0xda,0xff,0x00,0x00,0xff
+
+# GFX1250: ds_load_u16_d16_hi v5, v1 ; encoding: [0x00,0x00,0x9c,0xda,0x01,0x00,0x00,0x05]
+0x00,0x00,0x9c,0xda,0x01,0x00,0x00,0x05
+
+# GFX1250: ds_load_u16_d16_hi v5, v1 offset:65535 ; encoding: [0xff,0xff,0x9c,0xda,0x01,0x00,0x00,0x05]
+0xff,0xff,0x9c,0xda,0x01,0x00,0x00,0x05
+
+# GFX1250: ds_load_u8 v255, v255 offset:4 ; encoding: [0x04,0x00,0xe8,0xd8,0xff,0x00,0x00,0xff]
+0x04,0x00,0xe8,0xd8,0xff,0x00,0x00,0xff
+
+# GFX1250: ds_load_u8 v5, v1 ; encoding: [0x00,0x00,0xe8,0xd8,0x01,0x00,0x00,0x05]
+0x00,0x00,0xe8,0xd8,0x01,0x00,0x00,0x05
+
+# GFX1250: ds_load_u8 v5, v1 offset:65535 ; encoding: [0xff,0xff,0xe8,0xd8,0x01,0x00,0x00,0x05]
+0xff,0xff,0xe8,0xd8,0x01,0x00,0x00,0x05
+
+# GFX1250: ds_load_u8_d16 v255, v255 offset:4 ; encoding: [0x04,0x00,0x88,0xda,0xff,0x00,0x00,0xff]
+0x04,0x00,0x88,0xda,0xff,0x00,0x00,0xff
+
+# GFX1250: ds_load_u8_d16 v5, v1 ; encoding: [0x00,0x00,0x88,0xda,0x01,0x00,0x00,0x05]
+0x00,0x00,0x88,0xda,0x01,0x00,0x00,0x05
+
+# GFX1250: ds_load_u8_d16 v5, v1 offset:65535 ; encoding: [0xff,0xff,0x88,0xda,0x01,0x00,0x00,0x05]
+0xff,0xff,0x88,0xda,0x01,0x00,0x00,0x05
+
+# GFX1250: ds_load_u8_d16_hi v255, v255 offset:4 ; encoding: [0x04,0x00,0x8c,0xda,0xff,0x00,0x00,0xff]
+0x04,0x00,0x8c,0xda,0xff,0x00,0x00,0xff
+
+# GFX1250: ds_load_u8_d16_hi v5, v1 ; encoding: [0x00,0x00,0x8c,0xda,0x01,0x00,0x00,0x05]
+0x00,0x00,0x8c,0xda,0x01,0x00,0x00,0x05
+
+# GFX1250: ds_load_u8_d16_hi v5, v1 offset:65535 ; encoding: [0xff,0xff,0x8c,0xda,0x01,0x00,0x00,0x05]
+0xff,0xff,0x8c,0xda,0x01,0x00,0x00,0x05
+
+# GFX1250: ds_max_i32 v1, v2 ; encoding: [0x00,0x00,0x18,0xd8,0x01,0x02,0x00,0x00]
+0x00,0x00,0x18,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_max_i32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x18,0xd8,0x01,0x02,0x00,0x00]
+0xff,0xff,0x18,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_max_i32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x18,0xd8,0xff,0xff,0x00,0x00]
+0x04,0x00,0x18,0xd8,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_max_i64 v1, v[2:3] ; encoding: [0x00,0x00,0x18,0xd9,0x01,0x02,0x00,0x00]
+0x00,0x00,0x18,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_max_i64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x18,0xd9,0x01,0x02,0x00,0x00]
+0xff,0xff,0x18,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_max_i64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x18,0xd9,0xff,0xfe,0x00,0x00]
+0x04,0x00,0x18,0xd9,0xff,0xfe,0x00,0x00
+
+# GFX1250: ds_max_num_f32 v1, v2 ; encoding: [0x00,0x00,0x4c,0xd8,0x01,0x02,0x00,0x00]
+0x00,0x00,0x4c,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_max_num_f32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x4c,0xd8,0x01,0x02,0x00,0x00]
+0xff,0xff,0x4c,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_max_num_f32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x4c,0xd8,0xff,0xff,0x00,0x00]
+0x04,0x00,0x4c,0xd8,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_max_num_f64 v1, v[2:3] ; encoding: [0x00,0x00,0x4c,0xd9,0x01,0x02,0x00,0x00]
+0x00,0x00,0x4c,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_max_num_f64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x4c,0xd9,0x01,0x02,0x00,0x00]
+0xff,0xff,0x4c,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_max_num_f64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x4c,0xd9,0xff,0xfe,0x00,0x00]
+0x04,0x00,0x4c,0xd9,0xff,0xfe,0x00,0x00
+
+# GFX1250: ds_max_num_rtn_f32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xcc,0xd8,0xff,0xff,0x00,0xff]
+0x04,0x00,0xcc,0xd8,0xff,0xff,0x00,0xff
+
+# GFX1250: ds_max_num_rtn_f32 v5, v1, v2 ; encoding: [0x00,0x00,0xcc,0xd8,0x01,0x02,0x00,0x05]
+0x00,0x00,0xcc,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_max_num_rtn_f32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xcc,0xd8,0x01,0x02,0x00,0x05]
+0xff,0xff,0xcc,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_max_num_rtn_f64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0xcc,0xd9,0xff,0xfe,0x00,0xfe]
+0x04,0x00,0xcc,0xd9,0xff,0xfe,0x00,0xfe
+
+# GFX1250: ds_max_num_rtn_f64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xcc,0xd9,0x01,0x02,0x00,0x06]
+0x00,0x00,0xcc,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_max_num_rtn_f64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0xcc,0xd9,0x01,0x02,0x00,0x06]
+0xff,0xff,0xcc,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_max_rtn_i32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0x98,0xd8,0xff,0xff,0x00,0xff]
+0x04,0x00,0x98,0xd8,0xff,0xff,0x00,0xff
+
+# GFX1250: ds_max_rtn_i32 v5, v1, v2 ; encoding: [0x00,0x00,0x98,0xd8,0x01,0x02,0x00,0x05]
+0x00,0x00,0x98,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_max_rtn_i32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0x98,0xd8,0x01,0x02,0x00,0x05]
+0xff,0xff,0x98,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_max_rtn_i64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x98,0xd9,0xff,0xfe,0x00,0xfe]
+0x04,0x00,0x98,0xd9,0xff,0xfe,0x00,0xfe
+
+# GFX1250: ds_max_rtn_i64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x98,0xd9,0x01,0x02,0x00,0x06]
+0x00,0x00,0x98,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_max_rtn_i64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x98,0xd9,0x01,0x02,0x00,0x06]
+0xff,0xff,0x98,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_max_rtn_u32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xa0,0xd8,0xff,0xff,0x00,0xff]
+0x04,0x00,0xa0,0xd8,0xff,0xff,0x00,0xff
+
+# GFX1250: ds_max_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0xa0,0xd8,0x01,0x02,0x00,0x05]
+0x00,0x00,0xa0,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_max_rtn_u32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xa0,0xd8,0x01,0x02,0x00,0x05]
+0xff,0xff,0xa0,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_max_rtn_u64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0xa0,0xd9,0xff,0xfe,0x00,0xfe]
+0x04,0x00,0xa0,0xd9,0xff,0xfe,0x00,0xfe
+
+# GFX1250: ds_max_rtn_u64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xa0,0xd9,0x01,0x02,0x00,0x06]
+0x00,0x00,0xa0,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_max_rtn_u64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0xa0,0xd9,0x01,0x02,0x00,0x06]
+0xff,0xff,0xa0,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_max_u32 v1, v2 ; encoding: [0x00,0x00,0x20,0xd8,0x01,0x02,0x00,0x00]
+0x00,0x00,0x20,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_max_u32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x20,0xd8,0x01,0x02,0x00,0x00]
+0xff,0xff,0x20,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_max_u32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x20,0xd8,0xff,0xff,0x00,0x00]
+0x04,0x00,0x20,0xd8,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_max_u64 v1, v[2:3] ; encoding: [0x00,0x00,0x20,0xd9,0x01,0x02,0x00,0x00]
+0x00,0x00,0x20,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_max_u64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x20,0xd9,0x01,0x02,0x00,0x00]
+0xff,0xff,0x20,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_max_u64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x20,0xd9,0xff,0xfe,0x00,0x00]
+0x04,0x00,0x20,0xd9,0xff,0xfe,0x00,0x00
+
+# GFX1250: ds_min_i32 v1, v2 ; encoding: [0x00,0x00,0x14,0xd8,0x01,0x02,0x00,0x00]
+0x00,0x00,0x14,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_min_i32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x14,0xd8,0x01,0x02,0x00,0x00]
+0xff,0xff,0x14,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_min_i32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x14,0xd8,0xff,0xff,0x00,0x00]
+0x04,0x00,0x14,0xd8,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_min_i64 v1, v[2:3] ; encoding: [0x00,0x00,0x14,0xd9,0x01,0x02,0x00,0x00]
+0x00,0x00,0x14,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_min_i64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x14,0xd9,0x01,0x02,0x00,0x00]
+0xff,0xff,0x14,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_min_i64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x14,0xd9,0xff,0xfe,0x00,0x00]
+0x04,0x00,0x14,0xd9,0xff,0xfe,0x00,0x00
+
+# GFX1250: ds_min_num_f32 v1, v2 ; encoding: [0x00,0x00,0x48,0xd8,0x01,0x02,0x00,0x00]
+0x00,0x00,0x48,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_min_num_f32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x48,0xd8,0x01,0x02,0x00,0x00]
+0xff,0xff,0x48,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_min_num_f32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x48,0xd8,0xff,0xff,0x00,0x00]
+0x04,0x00,0x48,0xd8,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_min_num_f64 v1, v[2:3] ; encoding: [0x00,0x00,0x48,0xd9,0x01,0x02,0x00,0x00]
+0x00,0x00,0x48,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_min_num_f64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x48,0xd9,0x01,0x02,0x00,0x00]
+0xff,0xff,0x48,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_min_num_f64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x48,0xd9,0xff,0xfe,0x00,0x00]
+0x04,0x00,0x48,0xd9,0xff,0xfe,0x00,0x00
+
+# GFX1250: ds_min_num_rtn_f32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xc8,0xd8,0xff,0xff,0x00,0xff]
+0x04,0x00,0xc8,0xd8,0xff,0xff,0x00,0xff
+
+# GFX1250: ds_min_num_rtn_f32 v5, v1, v2 ; encoding: [0x00,0x00,0xc8,0xd8,0x01,0x02,0x00,0x05]
+0x00,0x00,0xc8,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_min_num_rtn_f32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xc8,0xd8,0x01,0x02,0x00,0x05]
+0xff,0xff,0xc8,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_min_num_rtn_f64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0xc8,0xd9,0xff,0xfe,0x00,0xfe]
+0x04,0x00,0xc8,0xd9,0xff,0xfe,0x00,0xfe
+
+# GFX1250: ds_min_num_rtn_f64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xc8,0xd9,0x01,0x02,0x00,0x06]
+0x00,0x00,0xc8,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_min_num_rtn_f64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0xc8,0xd9,0x01,0x02,0x00,0x06]
+0xff,0xff,0xc8,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_min_rtn_i32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0x94,0xd8,0xff,0xff,0x00,0xff]
+0x04,0x00,0x94,0xd8,0xff,0xff,0x00,0xff
+
+# GFX1250: ds_min_rtn_i32 v5, v1, v2 ; encoding: [0x00,0x00,0x94,0xd8,0x01,0x02,0x00,0x05]
+0x00,0x00,0x94,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_min_rtn_i32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0x94,0xd8,0x01,0x02,0x00,0x05]
+0xff,0xff,0x94,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_min_rtn_i64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x94,0xd9,0xff,0xfe,0x00,0xfe]
+0x04,0x00,0x94,0xd9,0xff,0xfe,0x00,0xfe
+
+# GFX1250: ds_min_rtn_i64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x94,0xd9,0x01,0x02,0x00,0x06]
+0x00,0x00,0x94,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_min_rtn_i64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x94,0xd9,0x01,0x02,0x00,0x06]
+0xff,0xff,0x94,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_min_rtn_u32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0x9c,0xd8,0xff,0xff,0x00,0xff]
+0x04,0x00,0x9c,0xd8,0xff,0xff,0x00,0xff
+
+# GFX1250: ds_min_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0x9c,0xd8,0x01,0x02,0x00,0x05]
+0x00,0x00,0x9c,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_min_rtn_u32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0x9c,0xd8,0x01,0x02,0x00,0x05]
+0xff,0xff,0x9c,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_min_rtn_u64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x9c,0xd9,0xff,0xfe,0x00,0xfe]
+0x04,0x00,0x9c,0xd9,0xff,0xfe,0x00,0xfe
+
+# GFX1250: ds_min_rtn_u64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x9c,0xd9,0x01,0x02,0x00,0x06]
+0x00,0x00,0x9c,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_min_rtn_u64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x9c,0xd9,0x01,0x02,0x00,0x06]
+0xff,0xff,0x9c,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_min_u32 v1, v2 ; encoding: [0x00,0x00,0x1c,0xd8,0x01,0x02,0x00,0x00]
+0x00,0x00,0x1c,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_min_u32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x1c,0xd8,0x01,0x02,0x00,0x00]
+0xff,0xff,0x1c,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_min_u32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x1c,0xd8,0xff,0xff,0x00,0x00]
+0x04,0x00,0x1c,0xd8,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_min_u64 v1, v[2:3] ; encoding: [0x00,0x00,0x1c,0xd9,0x01,0x02,0x00,0x00]
+0x00,0x00,0x1c,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_min_u64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x1c,0xd9,0x01,0x02,0x00,0x00]
+0xff,0xff,0x1c,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_min_u64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x1c,0xd9,0xff,0xfe,0x00,0x00]
+0x04,0x00,0x1c,0xd9,0xff,0xfe,0x00,0x00
+
+# GFX1250: ds_mskor_b32 v1, v2, v3 ; encoding: [0x00,0x00,0x30,0xd8,0x01,0x02,0x03,0x00]
+0x00,0x00,0x30,0xd8,0x01,0x02,0x03,0x00
+
+# GFX1250: ds_mskor_b32 v1, v2, v3 offset:65535 ; encoding: [0xff,0xff,0x30,0xd8,0x01,0x02,0x03,0x00]
+0xff,0xff,0x30,0xd8,0x01,0x02,0x03,0x00
+
+# GFX1250: ds_mskor_b32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0x30,0xd8,0xff,0xff,0xff,0x00]
+0x04,0x00,0x30,0xd8,0xff,0xff,0xff,0x00
+
+# GFX1250: ds_mskor_b64 v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0x30,0xd9,0x01,0x02,0x04,0x00]
+0x00,0x00,0x30,0xd9,0x01,0x02,0x04,0x00
+
+# GFX1250: ds_mskor_b64 v1, v[2:3], v[4:5] offset:65535 ; encoding: [0xff,0xff,0x30,0xd9,0x01,0x02,0x04,0x00]
+0xff,0xff,0x30,0xd9,0x01,0x02,0x04,0x00
+
+# GFX1250: ds_mskor_b64 v255, v[254:255], v[254:255] offset:4 ; encoding: [0x04,0x00,0x30,0xd9,0xff,0xfe,0xfe,0x00]
+0x04,0x00,0x30,0xd9,0xff,0xfe,0xfe,0x00
+
+# GFX1250: ds_mskor_rtn_b32 v255, v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xb0,0xd8,0xff,0xff,0xff,0xff]
+0x04,0x00,0xb0,0xd8,0xff,0xff,0xff,0xff
+
+# GFX1250: ds_mskor_rtn_b32 v5, v1, v2, v3 ; encoding: [0x00,0x00,0xb0,0xd8,0x01,0x02,0x03,0x05]
+0x00,0x00,0xb0,0xd8,0x01,0x02,0x03,0x05
+
+# GFX1250: ds_mskor_rtn_b32 v5, v1, v2, v3 offset:65535 ; encoding: [0xff,0xff,0xb0,0xd8,0x01,0x02,0x03,0x05]
+0xff,0xff,0xb0,0xd8,0x01,0x02,0x03,0x05
+
+# GFX1250: ds_mskor_rtn_b64 v[254:255], v255, v[254:255], v[254:255] offset:4 ; encoding: [0x04,0x00,0xb0,0xd9,0xff,0xfe,0xfe,0xfe]
+0x04,0x00,0xb0,0xd9,0xff,0xfe,0xfe,0xfe
+
+# GFX1250: ds_mskor_rtn_b64 v[6:7], v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0xb0,0xd9,0x01,0x02,0x04,0x06]
+0x00,0x00,0xb0,0xd9,0x01,0x02,0x04,0x06
+
+# GFX1250: ds_mskor_rtn_b64 v[6:7], v1, v[2:3], v[4:5] offset:65535 ; encoding: [0xff,0xff,0xb0,0xd9,0x01,0x02,0x04,0x06]
+0xff,0xff,0xb0,0xd9,0x01,0x02,0x04,0x06
+
+# GFX1250: ds_nop ; encoding: [0x00,0x00,0x50,0xd8,0x00,0x00,0x00,0x00]
+0x00,0x00,0x50,0xd8,0x00,0x00,0x00,0x00
+
+# GFX1250: ds_or_b32 v1, v2 ; encoding: [0x00,0x00,0x28,0xd8,0x01,0x02,0x00,0x00]
+0x00,0x00,0x28,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_or_b32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x28,0xd8,0x01,0x02,0x00,0x00]
+0xff,0xff,0x28,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_or_b32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x28,0xd8,0xff,0xff,0x00,0x00]
+0x04,0x00,0x28,0xd8,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_or_b64 v1, v[2:3] ; encoding: [0x00,0x00,0x28,0xd9,0x01,0x02,0x00,0x00]
+0x00,0x00,0x28,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_or_b64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x28,0xd9,0x01,0x02,0x00,0x00]
+0xff,0xff,0x28,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_or_b64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x28,0xd9,0xff,0xfe,0x00,0x00]
+0x04,0x00,0x28,0xd9,0xff,0xfe,0x00,0x00
+
+# GFX1250: ds_or_rtn_b32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xa8,0xd8,0xff,0xff,0x00,0xff]
+0x04,0x00,0xa8,0xd8,0xff,0xff,0x00,0xff
+
+# GFX1250: ds_or_rtn_b32 v5, v1, v2 ; encoding: [0x00,0x00,0xa8,0xd8,0x01,0x02,0x00,0x05]
+0x00,0x00,0xa8,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_or_rtn_b32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xa8,0xd8,0x01,0x02,0x00,0x05]
+0xff,0xff,0xa8,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_or_rtn_b64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0xa8,0xd9,0xff,0xfe,0x00,0xfe]
+0x04,0x00,0xa8,0xd9,0xff,0xfe,0x00,0xfe
+
+# GFX1250: ds_or_rtn_b64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xa8,0xd9,0x01,0x02,0x00,0x06]
+0x00,0x00,0xa8,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_or_rtn_b64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0xa8,0xd9,0x01,0x02,0x00,0x06]
+0xff,0xff,0xa8,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_permute_b32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xc8,0xda,0xff,0xff,0x00,0xff]
+0x04,0x00,0xc8,0xda,0xff,0xff,0x00,0xff
+
+# GFX1250: ds_permute_b32 v5, v1, v2 ; encoding: [0x00,0x00,0xc8,0xda,0x01,0x02,0x00,0x05]
+0x00,0x00,0xc8,0xda,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_permute_b32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xc8,0xda,0x01,0x02,0x00,0x05]
+0xff,0xff,0xc8,0xda,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_pk_add_bf16 v0, v0 ; encoding: [0x00,0x00,0x6c,0xda,0x00,0x00,0x00,0x00]
+0x00,0x00,0x6c,0xda,0x00,0x00,0x00,0x00
+
+# GFX1250: ds_pk_add_bf16 v0, v0 offset:65535 ; encoding: [0xff,0xff,0x6c,0xda,0x00,0x00,0x00,0x00]
+0xff,0xff,0x6c,0xda,0x00,0x00,0x00,0x00
+
+# GFX1250: ds_pk_add_bf16 v2, v1 ; encoding: [0x00,0x00,0x6c,0xda,0x02,0x01,0x00,0x00]
+0x00,0x00,0x6c,0xda,0x02,0x01,0x00,0x00
+
+# GFX1250: ds_pk_add_bf16 v255, v255 ; encoding: [0x00,0x00,0x6c,0xda,0xff,0xff,0x00,0x00]
+0x00,0x00,0x6c,0xda,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_pk_add_bf16 v255, v255 offset:4660 ; encoding: [0x34,0x12,0x6c,0xda,0xff,0xff,0x00,0x00]
+0x34,0x12,0x6c,0xda,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_pk_add_f16 v0, v0 ; encoding: [0x00,0x00,0x68,0xda,0x00,0x00,0x00,0x00]
+0x00,0x00,0x68,0xda,0x00,0x00,0x00,0x00
+
+# GFX1250: ds_pk_add_f16 v2, v1 ; encoding: [0x00,0x00,0x68,0xda,0x02,0x01,0x00,0x00]
+0x00,0x00,0x68,0xda,0x02,0x01,0x00,0x00
+
+# GFX1250: ds_pk_add_f16 v2, v1 offset:4660 ; encoding: [0x34,0x12,0x68,0xda,0x02,0x01,0x00,0x00]
+0x34,0x12,0x68,0xda,0x02,0x01,0x00,0x00
+
+# GFX1250: ds_pk_add_f16 v2, v1 offset:65535 ; encoding: [0xff,0xff,0x68,0xda,0x02,0x01,0x00,0x00]
+0xff,0xff,0x68,0xda,0x02,0x01,0x00,0x00
+
+# GFX1250: ds_pk_add_f16 v255, v255 ; encoding: [0x00,0x00,0x68,0xda,0xff,0xff,0x00,0x00]
+0x00,0x00,0x68,0xda,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_pk_add_f16 v255, v255 offset:4660 ; encoding: [0x34,0x12,0x68,0xda,0xff,0xff,0x00,0x00]
+0x34,0x12,0x68,0xda,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_pk_add_f16 v255, v255 offset:65535 ; encoding: [0xff,0xff,0x68,0xda,0xff,0xff,0x00,0x00]
+0xff,0xff,0x68,0xda,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_pk_add_rtn_bf16 v255, v0, v200 ; encoding: [0x00,0x00,0xac,0xda,0x00,0xc8,0x00,0xff]
+0x00,0x00,0xac,0xda,0x00,0xc8,0x00,0xff
+
+# GFX1250: ds_pk_add_rtn_bf16 v255, v255, v255 ; encoding: [0x00,0x00,0xac,0xda,0xff,0xff,0x00,0xff]
+0x00,0x00,0xac,0xda,0xff,0xff,0x00,0xff
+
+# GFX1250: ds_pk_add_rtn_bf16 v255, v255, v255 offset:65535 ; encoding: [0xff,0xff,0xac,0xda,0xff,0xff,0x00,0xff]
+0xff,0xff,0xac,0xda,0xff,0xff,0x00,0xff
+
+# GFX1250: ds_pk_add_rtn_bf16 v3, v2, v1 ; encoding: [0x00,0x00,0xac,0xda,0x02,0x01,0x00,0x03]
+0x00,0x00,0xac,0xda,0x02,0x01,0x00,0x03
+
+# GFX1250: ds_pk_add_rtn_bf16 v3, v2, v1 offset:4660 ; encoding: [0x34,0x12,0xac,0xda,0x02,0x01,0x00,0x03]
+0x34,0x12,0xac,0xda,0x02,0x01,0x00,0x03
+
+# GFX1250: ds_pk_add_rtn_f16 v255, v0, v200 ; encoding: [0x00,0x00,0xa8,0xda,0x00,0xc8,0x00,0xff]
+0x00,0x00,0xa8,0xda,0x00,0xc8,0x00,0xff
+
+# GFX1250: ds_pk_add_rtn_f16 v255, v0, v200 offset:65535 ; encoding: [0xff,0xff,0xa8,0xda,0x00,0xc8,0x00,0xff]
+0xff,0xff,0xa8,0xda,0x00,0xc8,0x00,0xff
+
+# GFX1250: ds_pk_add_rtn_f16 v255, v255, v255 ; encoding: [0x00,0x00,0xa8,0xda,0xff,0xff,0x00,0xff]
+0x00,0x00,0xa8,0xda,0xff,0xff,0x00,0xff
+
+# GFX1250: ds_pk_add_rtn_f16 v3, v2, v1 ; encoding: [0x00,0x00,0xa8,0xda,0x02,0x01,0x00,0x03]
+0x00,0x00,0xa8,0xda,0x02,0x01,0x00,0x03
+
+# GFX1250: ds_pk_add_rtn_f16 v3, v2, v1 offset:4660 ; encoding: [0x34,0x12,0xa8,0xda,0x02,0x01,0x00,0x03]
+0x34,0x12,0xa8,0xda,0x02,0x01,0x00,0x03
+
+# GFX1250: ds_rsub_rtn_u32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0x88,0xd8,0xff,0xff,0x00,0xff]
+0x04,0x00,0x88,0xd8,0xff,0xff,0x00,0xff
+
+# GFX1250: ds_rsub_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0x88,0xd8,0x01,0x02,0x00,0x05]
+0x00,0x00,0x88,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_rsub_rtn_u32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0x88,0xd8,0x01,0x02,0x00,0x05]
+0xff,0xff,0x88,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_rsub_rtn_u64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x88,0xd9,0xff,0xfe,0x00,0xfe]
+0x04,0x00,0x88,0xd9,0xff,0xfe,0x00,0xfe
+
+# GFX1250: ds_rsub_rtn_u64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x88,0xd9,0x01,0x02,0x00,0x06]
+0x00,0x00,0x88,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_rsub_rtn_u64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x88,0xd9,0x01,0x02,0x00,0x06]
+0xff,0xff,0x88,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_rsub_u32 v1, v2 ; encoding: [0x00,0x00,0x08,0xd8,0x01,0x02,0x00,0x00]
+0x00,0x00,0x08,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_rsub_u32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x08,0xd8,0x01,0x02,0x00,0x00]
+0xff,0xff,0x08,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_rsub_u32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x08,0xd8,0xff,0xff,0x00,0x00]
+0x04,0x00,0x08,0xd8,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_rsub_u64 v1, v[2:3] ; encoding: [0x00,0x00,0x08,0xd9,0x01,0x02,0x00,0x00]
+0x00,0x00,0x08,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_rsub_u64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x08,0xd9,0x01,0x02,0x00,0x00]
+0xff,0xff,0x08,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_rsub_u64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x08,0xd9,0xff,0xfe,0x00,0x00]
+0x04,0x00,0x08,0xd9,0xff,0xfe,0x00,0x00
+
+# GFX1250: ds_store_2addr_b32 v1, v2, v3 ; encoding: [0x00,0x00,0x38,0xd8,0x01,0x02,0x03,0x00]
+0x00,0x00,0x38,0xd8,0x01,0x02,0x03,0x00
+
+# GFX1250: ds_store_2addr_b32 v1, v2, v3 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x38,0xd8,0x01,0x02,0x03,0x00]
+0x7f,0xff,0x38,0xd8,0x01,0x02,0x03,0x00
+
+# GFX1250: ds_store_2addr_b32 v255, v255, v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0x38,0xd8,0xff,0xff,0xff,0x00]
+0x10,0x01,0x38,0xd8,0xff,0xff,0xff,0x00
+
+# GFX1250: ds_store_2addr_b64 v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0x38,0xd9,0x01,0x02,0x04,0x00]
+0x00,0x00,0x38,0xd9,0x01,0x02,0x04,0x00
+
+# GFX1250: ds_store_2addr_b64 v1, v[2:3], v[4:5] offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x38,0xd9,0x01,0x02,0x04,0x00]
+0x7f,0xff,0x38,0xd9,0x01,0x02,0x04,0x00
+
+# GFX1250: ds_store_2addr_b64 v255, v[254:255], v[254:255] offset0:16 offset1:1 ; encoding: [0x10,0x01,0x38,0xd9,0xff,0xfe,0xfe,0x00]
+0x10,0x01,0x38,0xd9,0xff,0xfe,0xfe,0x00
+
+# GFX1250: ds_store_2addr_stride64_b32 v1, v2, v3 ; encoding: [0x00,0x00,0x3c,0xd8,0x01,0x02,0x03,0x00]
+0x00,0x00,0x3c,0xd8,0x01,0x02,0x03,0x00
+
+# GFX1250: ds_store_2addr_stride64_b32 v1, v2, v3 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x3c,0xd8,0x01,0x02,0x03,0x00]
+0x7f,0xff,0x3c,0xd8,0x01,0x02,0x03,0x00
+
+# GFX1250: ds_store_2addr_stride64_b32 v255, v255, v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0x3c,0xd8,0xff,0xff,0xff,0x00]
+0x10,0x01,0x3c,0xd8,0xff,0xff,0xff,0x00
+
+# GFX1250: ds_store_2addr_stride64_b64 v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0x3c,0xd9,0x01,0x02,0x04,0x00]
+0x00,0x00,0x3c,0xd9,0x01,0x02,0x04,0x00
+
+# GFX1250: ds_store_2addr_stride64_b64 v1, v[2:3], v[4:5] offset0:127 offset1:255 ; encoding: [0x7f,0xff,0x3c,0xd9,0x01,0x02,0x04,0x00]
+0x7f,0xff,0x3c,0xd9,0x01,0x02,0x04,0x00
+
+# GFX1250: ds_store_2addr_stride64_b64 v255, v[254:255], v[254:255] offset0:16 offset1:1 ; encoding: [0x10,0x01,0x3c,0xd9,0xff,0xfe,0xfe,0x00]
+0x10,0x01,0x3c,0xd9,0xff,0xfe,0xfe,0x00
+
+# GFX1250: ds_store_addtid_b32 v1 ; encoding: [0x00,0x00,0xc0,0xda,0x00,0x01,0x00,0x00]
+0x00,0x00,0xc0,0xda,0x00,0x01,0x00,0x00
+
+# GFX1250: ds_store_addtid_b32 v1 offset:65535 ; encoding: [0xff,0xff,0xc0,0xda,0x00,0x01,0x00,0x00]
+0xff,0xff,0xc0,0xda,0x00,0x01,0x00,0x00
+
+# GFX1250: ds_store_addtid_b32 v255 offset:4 ; encoding: [0x04,0x00,0xc0,0xda,0x00,0xff,0x00,0x00]
+0x04,0x00,0xc0,0xda,0x00,0xff,0x00,0x00
+
+# GFX1250: ds_store_b128 v1, v[2:5] ; encoding: [0x00,0x00,0x7c,0xdb,0x01,0x02,0x00,0x00]
+0x00,0x00,0x7c,0xdb,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_store_b128 v1, v[2:5] offset:65535 ; encoding: [0xff,0xff,0x7c,0xdb,0x01,0x02,0x00,0x00]
+0xff,0xff,0x7c,0xdb,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_store_b128 v255, v[252:255] offset:4 ; encoding: [0x04,0x00,0x7c,0xdb,0xff,0xfc,0x00,0x00]
+0x04,0x00,0x7c,0xdb,0xff,0xfc,0x00,0x00
+
+# GFX1250: ds_store_b16 v1, v2 ; encoding: [0x00,0x00,0x7c,0xd8,0x01,0x02,0x00,0x00]
+0x00,0x00,0x7c,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_store_b16 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x7c,0xd8,0x01,0x02,0x00,0x00]
+0xff,0xff,0x7c,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_store_b16 v255, v255 offset:4 ; encoding: [0x04,0x00,0x7c,0xd8,0xff,0xff,0x00,0x00]
+0x04,0x00,0x7c,0xd8,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_store_b16_d16_hi v1, v2 ; encoding: [0x00,0x00,0x84,0xda,0x01,0x02,0x00,0x00]
+0x00,0x00,0x84,0xda,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_store_b16_d16_hi v1, v2 offset:65535 ; encoding: [0xff,0xff,0x84,0xda,0x01,0x02,0x00,0x00]
+0xff,0xff,0x84,0xda,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_store_b16_d16_hi v255, v255 offset:4 ; encoding: [0x04,0x00,0x84,0xda,0xff,0xff,0x00,0x00]
+0x04,0x00,0x84,0xda,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_store_b32 v1, v2 ; encoding: [0x00,0x00,0x34,0xd8,0x01,0x02,0x00,0x00]
+0x00,0x00,0x34,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_store_b32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x34,0xd8,0x01,0x02,0x00,0x00]
+0xff,0xff,0x34,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_store_b32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x34,0xd8,0xff,0xff,0x00,0x00]
+0x04,0x00,0x34,0xd8,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_store_b64 v1, v[2:3] ; encoding: [0x00,0x00,0x34,0xd9,0x01,0x02,0x00,0x00]
+0x00,0x00,0x34,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_store_b64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x34,0xd9,0x01,0x02,0x00,0x00]
+0xff,0xff,0x34,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_store_b64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x34,0xd9,0xff,0xfe,0x00,0x00]
+0x04,0x00,0x34,0xd9,0xff,0xfe,0x00,0x00
+
+# GFX1250: ds_store_b8 v1, v2 ; encoding: [0x00,0x00,0x78,0xd8,0x01,0x02,0x00,0x00]
+0x00,0x00,0x78,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_store_b8 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x78,0xd8,0x01,0x02,0x00,0x00]
+0xff,0xff,0x78,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_store_b8 v255, v255 offset:4 ; encoding: [0x04,0x00,0x78,0xd8,0xff,0xff,0x00,0x00]
+0x04,0x00,0x78,0xd8,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_store_b8_d16_hi v1, v2 ; encoding: [0x00,0x00,0x80,0xda,0x01,0x02,0x00,0x00]
+0x00,0x00,0x80,0xda,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_store_b8_d16_hi v1, v2 offset:65535 ; encoding: [0xff,0xff,0x80,0xda,0x01,0x02,0x00,0x00]
+0xff,0xff,0x80,0xda,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_store_b8_d16_hi v255, v255 offset:4 ; encoding: [0x04,0x00,0x80,0xda,0xff,0xff,0x00,0x00]
+0x04,0x00,0x80,0xda,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_store_b96 v1, v[2:4] ; encoding: [0x00,0x00,0x78,0xdb,0x01,0x02,0x00,0x00]
+0x00,0x00,0x78,0xdb,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_store_b96 v1, v[2:4] offset:65535 ; encoding: [0xff,0xff,0x78,0xdb,0x01,0x02,0x00,0x00]
+0xff,0xff,0x78,0xdb,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_store_b96 v255, v[252:254] offset:4 ; encoding: [0x04,0x00,0x78,0xdb,0xff,0xfc,0x00,0x00]
+0x04,0x00,0x78,0xdb,0xff,0xfc,0x00,0x00
+
+# GFX1250: ds_storexchg_2addr_rtn_b32 v[254:255], v255, v255, v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0xb8,0xd8,0xff,0xff,0xff,0xfe]
+0x10,0x01,0xb8,0xd8,0xff,0xff,0xff,0xfe
+
+# GFX1250: ds_storexchg_2addr_rtn_b32 v[6:7], v1, v2, v3 ; encoding: [0x00,0x00,0xb8,0xd8,0x01,0x02,0x03,0x06]
+0x00,0x00,0xb8,0xd8,0x01,0x02,0x03,0x06
+
+# GFX1250: ds_storexchg_2addr_rtn_b32 v[6:7], v1, v2, v3 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xb8,0xd8,0x01,0x02,0x03,0x06]
+0x7f,0xff,0xb8,0xd8,0x01,0x02,0x03,0x06
+
+# GFX1250: ds_storexchg_2addr_rtn_b64 v[252:255], v255, v[254:255], v[254:255] offset0:16 offset1:1 ; encoding: [0x10,0x01,0xb8,0xd9,0xff,0xfe,0xfe,0xfc]
+0x10,0x01,0xb8,0xd9,0xff,0xfe,0xfe,0xfc
+
+# GFX1250: ds_storexchg_2addr_rtn_b64 v[6:9], v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0xb8,0xd9,0x01,0x02,0x04,0x06]
+0x00,0x00,0xb8,0xd9,0x01,0x02,0x04,0x06
+
+# GFX1250: ds_storexchg_2addr_rtn_b64 v[6:9], v1, v[2:3], v[4:5] offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xb8,0xd9,0x01,0x02,0x04,0x06]
+0x7f,0xff,0xb8,0xd9,0x01,0x02,0x04,0x06
+
+# GFX1250: ds_storexchg_2addr_stride64_rtn_b32 v[254:255], v255, v255, v255 offset0:16 offset1:1 ; encoding: [0x10,0x01,0xbc,0xd8,0xff,0xff,0xff,0xfe]
+0x10,0x01,0xbc,0xd8,0xff,0xff,0xff,0xfe
+
+# GFX1250: ds_storexchg_2addr_stride64_rtn_b32 v[6:7], v1, v2, v3 ; encoding: [0x00,0x00,0xbc,0xd8,0x01,0x02,0x03,0x06]
+0x00,0x00,0xbc,0xd8,0x01,0x02,0x03,0x06
+
+# GFX1250: ds_storexchg_2addr_stride64_rtn_b32 v[6:7], v1, v2, v3 offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xbc,0xd8,0x01,0x02,0x03,0x06]
+0x7f,0xff,0xbc,0xd8,0x01,0x02,0x03,0x06
+
+# GFX1250: ds_storexchg_2addr_stride64_rtn_b64 v[252:255], v255, v[254:255], v[254:255] offset0:16 offset1:1 ; encoding: [0x10,0x01,0xbc,0xd9,0xff,0xfe,0xfe,0xfc]
+0x10,0x01,0xbc,0xd9,0xff,0xfe,0xfe,0xfc
+
+# GFX1250: ds_storexchg_2addr_stride64_rtn_b64 v[6:9], v1, v[2:3], v[4:5] ; encoding: [0x00,0x00,0xbc,0xd9,0x01,0x02,0x04,0x06]
+0x00,0x00,0xbc,0xd9,0x01,0x02,0x04,0x06
+
+# GFX1250: ds_storexchg_2addr_stride64_rtn_b64 v[6:9], v1, v[2:3], v[4:5] offset0:127 offset1:255 ; encoding: [0x7f,0xff,0xbc,0xd9,0x01,0x02,0x04,0x06]
+0x7f,0xff,0xbc,0xd9,0x01,0x02,0x04,0x06
+
+# GFX1250: ds_storexchg_rtn_b32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xb4,0xd8,0xff,0xff,0x00,0xff]
+0x04,0x00,0xb4,0xd8,0xff,0xff,0x00,0xff
+
+# GFX1250: ds_storexchg_rtn_b32 v5, v1, v2 ; encoding: [0x00,0x00,0xb4,0xd8,0x01,0x02,0x00,0x05]
+0x00,0x00,0xb4,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_storexchg_rtn_b32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xb4,0xd8,0x01,0x02,0x00,0x05]
+0xff,0xff,0xb4,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_storexchg_rtn_b64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0xb4,0xd9,0xff,0xfe,0x00,0xfe]
+0x04,0x00,0xb4,0xd9,0xff,0xfe,0x00,0xfe
+
+# GFX1250: ds_storexchg_rtn_b64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xb4,0xd9,0x01,0x02,0x00,0x06]
+0x00,0x00,0xb4,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_storexchg_rtn_b64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0xb4,0xd9,0x01,0x02,0x00,0x06]
+0xff,0xff,0xb4,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_sub_clamp_rtn_u32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xa4,0xda,0xff,0xff,0x00,0xff]
+0x04,0x00,0xa4,0xda,0xff,0xff,0x00,0xff
+
+# GFX1250: ds_sub_clamp_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0xa4,0xda,0x01,0x02,0x00,0x05]
+0x00,0x00,0xa4,0xda,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_sub_clamp_rtn_u32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xa4,0xda,0x01,0x02,0x00,0x05]
+0xff,0xff,0xa4,0xda,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_sub_clamp_u32 v1, v2 ; encoding: [0x00,0x00,0x64,0xda,0x01,0x02,0x00,0x00]
+0x00,0x00,0x64,0xda,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_sub_clamp_u32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x64,0xda,0x01,0x02,0x00,0x00]
+0xff,0xff,0x64,0xda,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_sub_clamp_u32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x64,0xda,0xff,0xff,0x00,0x00]
+0x04,0x00,0x64,0xda,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_sub_rtn_u32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0x84,0xd8,0xff,0xff,0x00,0xff]
+0x04,0x00,0x84,0xd8,0xff,0xff,0x00,0xff
+
+# GFX1250: ds_sub_rtn_u32 v5, v1, v2 ; encoding: [0x00,0x00,0x84,0xd8,0x01,0x02,0x00,0x05]
+0x00,0x00,0x84,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_sub_rtn_u32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0x84,0xd8,0x01,0x02,0x00,0x05]
+0xff,0xff,0x84,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_sub_rtn_u64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x84,0xd9,0xff,0xfe,0x00,0xfe]
+0x04,0x00,0x84,0xd9,0xff,0xfe,0x00,0xfe
+
+# GFX1250: ds_sub_rtn_u64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0x84,0xd9,0x01,0x02,0x00,0x06]
+0x00,0x00,0x84,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_sub_rtn_u64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x84,0xd9,0x01,0x02,0x00,0x06]
+0xff,0xff,0x84,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_sub_u32 v1, v2 ; encoding: [0x00,0x00,0x04,0xd8,0x01,0x02,0x00,0x00]
+0x00,0x00,0x04,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_sub_u32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x04,0xd8,0x01,0x02,0x00,0x00]
+0xff,0xff,0x04,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_sub_u32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x04,0xd8,0xff,0xff,0x00,0x00]
+0x04,0x00,0x04,0xd8,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_sub_u64 v1, v[2:3] ; encoding: [0x00,0x00,0x04,0xd9,0x01,0x02,0x00,0x00]
+0x00,0x00,0x04,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_sub_u64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x04,0xd9,0x01,0x02,0x00,0x00]
+0xff,0xff,0x04,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_sub_u64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x04,0xd9,0xff,0xfe,0x00,0x00]
+0x04,0x00,0x04,0xd9,0xff,0xfe,0x00,0x00
+
+# GFX1250: ds_swizzle_b32 v8, v2 ; encoding: [0x00,0x00,0xd4,0xd8,0x02,0x00,0x00,0x08]
+0x00,0x00,0xd4,0xd8,0x02,0x00,0x00,0x08
+
+# GFX1250: ds_swizzle_b32 v8, v2 offset:swizzle(FFT,31) ; encoding: [0xff,0xff,0xd4,0xd8,0x02,0x00,0x00,0x08]
+0xff,0xff,0xd4,0xd8,0x02,0x00,0x00,0x08
+
+# GFX1250: ds_swizzle_b32 v8, v2 offset:swizzle(BITMASK_PERM,"01pip") ; encoding: [0x07,0x09,0xd4,0xd8,0x02,0x00,0x00,0x08]
+0x07,0x09,0xd4,0xd8,0x02,0x00,0x00,0x08
+
+# GFX1250: ds_swizzle_b32 v8, v2 offset:swizzle(BROADCAST,4,1) ; encoding: [0x3c,0x00,0xd4,0xd8,0x02,0x00,0x00,0x08]
+0x3c,0x00,0xd4,0xd8,0x02,0x00,0x00,0x08
+
+# GFX1250: ds_swizzle_b32 v8, v2 offset:swizzle(BROADCAST,8,7) ; encoding: [0xf8,0x00,0xd4,0xd8,0x02,0x00,0x00,0x08]
+0xf8,0x00,0xd4,0xd8,0x02,0x00,0x00,0x08
+
+# GFX1250: ds_swizzle_b32 v8, v2 offset:swizzle(QUAD_PERM,0,1,2,3) ; encoding: [0xe4,0x80,0xd4,0xd8,0x02,0x00,0x00,0x08]
+0xe4,0x80,0xd4,0xd8,0x02,0x00,0x00,0x08
+
+# GFX1250: ds_swizzle_b32 v8, v2 offset:swizzle(REVERSE,8) ; encoding: [0x1f,0x1c,0xd4,0xd8,0x02,0x00,0x00,0x08]
+0x1f,0x1c,0xd4,0xd8,0x02,0x00,0x00,0x08
+
+# GFX1250: ds_swizzle_b32 v8, v2 offset:swizzle(SWAP,16) ; encoding: [0x1f,0x40,0xd4,0xd8,0x02,0x00,0x00,0x08]
+0x1f,0x40,0xd4,0xd8,0x02,0x00,0x00,0x08
+
+# GFX1250: ds_xor_b32 v1, v2 ; encoding: [0x00,0x00,0x2c,0xd8,0x01,0x02,0x00,0x00]
+0x00,0x00,0x2c,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_xor_b32 v1, v2 offset:65535 ; encoding: [0xff,0xff,0x2c,0xd8,0x01,0x02,0x00,0x00]
+0xff,0xff,0x2c,0xd8,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_xor_b32 v255, v255 offset:4 ; encoding: [0x04,0x00,0x2c,0xd8,0xff,0xff,0x00,0x00]
+0x04,0x00,0x2c,0xd8,0xff,0xff,0x00,0x00
+
+# GFX1250: ds_xor_b64 v1, v[2:3] ; encoding: [0x00,0x00,0x2c,0xd9,0x01,0x02,0x00,0x00]
+0x00,0x00,0x2c,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_xor_b64 v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0x2c,0xd9,0x01,0x02,0x00,0x00]
+0xff,0xff,0x2c,0xd9,0x01,0x02,0x00,0x00
+
+# GFX1250: ds_xor_b64 v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0x2c,0xd9,0xff,0xfe,0x00,0x00]
+0x04,0x00,0x2c,0xd9,0xff,0xfe,0x00,0x00
+
+# GFX1250: ds_xor_rtn_b32 v255, v255, v255 offset:4 ; encoding: [0x04,0x00,0xac,0xd8,0xff,0xff,0x00,0xff]
+0x04,0x00,0xac,0xd8,0xff,0xff,0x00,0xff
+
+# GFX1250: ds_xor_rtn_b32 v5, v1, v2 ; encoding: [0x00,0x00,0xac,0xd8,0x01,0x02,0x00,0x05]
+0x00,0x00,0xac,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_xor_rtn_b32 v5, v1, v2 offset:65535 ; encoding: [0xff,0xff,0xac,0xd8,0x01,0x02,0x00,0x05]
+0xff,0xff,0xac,0xd8,0x01,0x02,0x00,0x05
+
+# GFX1250: ds_xor_rtn_b64 v[254:255], v255, v[254:255] offset:4 ; encoding: [0x04,0x00,0xac,0xd9,0xff,0xfe,0x00,0xfe]
+0x04,0x00,0xac,0xd9,0xff,0xfe,0x00,0xfe
+
+# GFX1250: ds_xor_rtn_b64 v[6:7], v1, v[2:3] ; encoding: [0x00,0x00,0xac,0xd9,0x01,0x02,0x00,0x06]
+0x00,0x00,0xac,0xd9,0x01,0x02,0x00,0x06
+
+# GFX1250: ds_xor_rtn_b64 v[6:7], v1, v[2:3] offset:65535 ; encoding: [0xff,0xff,0xac,0xd9,0x01,0x02,0x00,0x06]
+0xff,0xff,0xac,0xd9,0x01,0x02,0x00,0x06
+
# GFX1250: ds_atomic_async_barrier_arrive_b64 v1 offset:65407 ; encoding: [0x7f,0xff,0x58,0xd9,0x01,0x00,0x00,0x00]
0x7f,0xff,0x58,0xd9,0x01,0x00,0x00,0x00
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_operands.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_operands.txt
index a3e7e57..d72009b 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_operands.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_operands.txt
@@ -20,3 +20,15 @@
# GFX1250: s_mov_b64 s[0:1], src_shared_limit ; encoding: [0xec,0x01,0x80,0xbe]
0xec,0x01,0x80,0xbe
+
+# GFX1250: s_getreg_b32 s1, hwreg(HW_REG_XNACK_STATE_PRIV) ; encoding: [0x21,0xf8,0x81,0xb8]
+0x21,0xf8,0x81,0xb8
+
+# GFX1250: s_getreg_b32 s1, hwreg(HW_REG_XNACK_MASK) ; encoding: [0x22,0xf8,0x81,0xb8]
+0x22,0xf8,0x81,0xb8
+
+# GFX1250: s_setreg_b32 hwreg(HW_REG_XNACK_STATE_PRIV), s1 ; encoding: [0x21,0xf8,0x01,0xb9]
+0x21,0xf8,0x01,0xb9
+
+# GFX1250: s_setreg_b32 hwreg(HW_REG_XNACK_MASK), s1 ; encoding: [0x22,0xf8,0x01,0xb9]
+0x22,0xf8,0x01,0xb9
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_sop1.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_sop1.txt
index 83fa647..07aca1e 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_sop1.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_sop1.txt
@@ -12,6 +12,9 @@
# GFX1250: s_add_pc_i64 s[2:3] ; encoding: [0x02,0x4b,0x80,0xbe]
0x02,0x4b,0x80,0xbe
+# GFX1250: s_get_shader_cycles_u64 s[2:3] ; encoding: [0x00,0x06,0x82,0xbe]
+0x00,0x06,0x82,0xbe
+
# GFX1250: s_barrier_signal -3 ; encoding: [0xc3,0x4e,0x80,0xbe]
0xc3,0x4e,0x80,0xbe
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vbuffer_mubuf.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vbuffer_mubuf.txt
index 2499225..ddf779a 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vbuffer_mubuf.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vbuffer_mubuf.txt
@@ -1,5 +1,2138 @@
# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX1250 %s
+# GFX1250: buffer_atomic_add_f32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x15,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x15,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_f32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x15,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x15,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_f32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x15,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x80,0x15,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_f32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x15,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x80,0x15,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_f32 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x15,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x80,0x15,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_add_f32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x15,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x80,0x15,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_add_f32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x15,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x15,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x15,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x15,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x80,0x15,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x15,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x80,0x15,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x15,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_f32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x15,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x15,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_f32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x15,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x15,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_f32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x15,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x15,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_u32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x0d,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0d,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_u32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x0d,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0d,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_u32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x40,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_u32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x40,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_u32 v5, off, s[8:11], s3 ; encoding: [0x03,0x40,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x40,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_add_u32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x40,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_add_u32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x0d,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0d,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x40,0x0d,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0d,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x40,0x0d,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0d,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_u32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x0d,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0d,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_u32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x0d,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0d,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_u32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x0d,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0d,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_u64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x10,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x10,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_u64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0xc0,0x10,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x10,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_u64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0xc0,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0xc0,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_u64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0xc0,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0xc0,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_u64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0xc0,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0xc0,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_add_u64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0xc0,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0xc0,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_add_u64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x10,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x10,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x10,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x10,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x10,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x10,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_u64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0xc0,0x10,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x10,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_u64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0xc0,0x10,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x10,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_add_u64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0xc0,0x10,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x10,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_and_b32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x0f,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0f,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_and_b32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x00,0x0f,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0f,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_and_b32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x00,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x00,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_and_b32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x00,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x00,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_and_b32 v5, off, s[8:11], s3 ; encoding: [0x03,0x00,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x00,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_and_b32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x00,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_and_b32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_and_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x00,0x0f,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0f,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_and_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x00,0x0f,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0f,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_and_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x00,0x0f,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0f,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_and_b32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x00,0x0f,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0f,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_and_b32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x00,0x0f,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0f,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_and_b32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x00,0x0f,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0f,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_and_b64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x12,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x12,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_and_b64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x12,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x12,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_and_b64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x40,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_and_b64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x40,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_and_b64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x40,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x40,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_and_b64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x40,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_and_b64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_and_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x12,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x12,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_and_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x40,0x12,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x12,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_and_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x40,0x12,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x12,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_and_b64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x12,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x12,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_and_b64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x12,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x12,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_and_b64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x12,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x12,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cmpswap_b32 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x0d,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0d,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cmpswap_b32 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x00,0x0d,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0d,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x00,0x0d,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x00,0x0d,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x00,0x0d,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x00,0x0d,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x00,0x0d,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x00,0x0d,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x0d,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x00,0x0d,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x0d,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0d,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x00,0x0d,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0d,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x00,0x0d,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0d,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cmpswap_b32 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x00,0x0d,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0d,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cmpswap_b32 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x00,0x0d,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0d,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cmpswap_b32 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x00,0x0d,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0d,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cmpswap_b32 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x00,0x0d,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0d,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cmpswap_b64 v[252:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x10,0xc4,0xfc,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x10,0xc4,0xfc,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cmpswap_b64 v[6:9], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x10,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x10,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x80,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x80,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s3 ; encoding: [0x03,0x80,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x80,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x80,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x10,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x10,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x80,0x10,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x10,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cmpswap_b64 v[6:9], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x80,0x10,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x10,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cmpswap_b64 v[6:9], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x10,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x10,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cmpswap_b64 v[6:9], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x10,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x10,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cmpswap_b64 v[6:9], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x10,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x10,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cond_sub_u32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x14,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x14,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cond_sub_u32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x00,0x14,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x14,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cond_sub_u32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x00,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x00,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cond_sub_u32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x00,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x00,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cond_sub_u32 v5, off, s[8:11], s3 ; encoding: [0x03,0x00,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x00,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_cond_sub_u32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x00,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_cond_sub_u32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cond_sub_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x00,0x14,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x14,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cond_sub_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x00,0x14,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x14,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cond_sub_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x00,0x14,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x14,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cond_sub_u32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x00,0x14,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x14,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cond_sub_u32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x00,0x14,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x14,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_cond_sub_u32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x00,0x14,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x14,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_dec_u32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x10,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x10,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_dec_u32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x00,0x10,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x10,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_dec_u32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x00,0x10,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x00,0x10,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_dec_u32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x00,0x10,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x00,0x10,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_dec_u32 v5, off, s[8:11], s3 ; encoding: [0x03,0x00,0x10,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x00,0x10,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_dec_u32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x10,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x00,0x10,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_dec_u32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x10,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x10,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_dec_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x00,0x10,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x10,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_dec_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x00,0x10,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x10,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_dec_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x00,0x10,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x10,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_dec_u32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x00,0x10,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x10,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_dec_u32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x00,0x10,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x10,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_dec_u32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x00,0x10,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x10,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_dec_u64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x13,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x13,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_dec_u64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x13,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x13,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_dec_u64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x40,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_dec_u64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x40,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_dec_u64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x40,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x40,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_dec_u64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x40,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_dec_u64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_dec_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x13,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x13,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_dec_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x40,0x13,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x13,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_dec_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x40,0x13,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x13,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_dec_u64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x13,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x13,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_dec_u64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x13,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x13,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_dec_u64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x13,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x13,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_inc_u32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0f,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0f,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_inc_u32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0f,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0f,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_inc_u32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0xc0,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0xc0,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_inc_u32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0xc0,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0xc0,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_inc_u32 v5, off, s[8:11], s3 ; encoding: [0x03,0xc0,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0xc0,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_inc_u32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0xc0,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0xc0,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_inc_u32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_inc_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0f,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0f,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_inc_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0f,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0f,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_inc_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0f,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0f,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_inc_u32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0f,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0f,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_inc_u32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0xc0,0x0f,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0f,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_inc_u32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0xc0,0x0f,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0f,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_inc_u64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x13,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x13,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_inc_u64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x00,0x13,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x13,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_inc_u64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x00,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x00,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_inc_u64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x00,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x00,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_inc_u64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x00,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x00,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_inc_u64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x00,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_inc_u64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x13,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_inc_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x00,0x13,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x13,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_inc_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x00,0x13,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x13,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_inc_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x00,0x13,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x13,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_inc_u64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x00,0x13,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x13,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_inc_u64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x00,0x13,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x13,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_inc_u64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x00,0x13,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x13,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_i32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x0e,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0e,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_i32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x0e,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0e,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_i32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x80,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_i32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x80,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_i32 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x80,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_max_i32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x80,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_max_i32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_i32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x0e,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0e,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_i32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x80,0x0e,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0e,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_i32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x80,0x0e,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0e,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_i32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x0e,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0e,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_i32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x0e,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0e,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_i32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x0e,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0e,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_i64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x11,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x11,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_i64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0xc0,0x11,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x11,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_i64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0xc0,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0xc0,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_i64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0xc0,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0xc0,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_i64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0xc0,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0xc0,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_max_i64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0xc0,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0xc0,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_max_i64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_i64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x11,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x11,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_i64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x11,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x11,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_i64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x11,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x11,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_i64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0xc0,0x11,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x11,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_i64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0xc0,0x11,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x11,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_i64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0xc0,0x11,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x11,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_num_f32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x14,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x14,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_num_f32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x14,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x14,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_num_f32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x80,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_num_f32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x80,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_num_f32 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x80,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_max_num_f32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x80,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_max_num_f32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_num_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x14,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x14,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_num_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x80,0x14,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x14,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_num_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x80,0x14,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x14,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_num_f32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x14,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x14,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_num_f32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x14,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x14,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_num_f32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x14,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x14,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_u32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0e,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0e,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_u32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0e,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0e,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_u32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0xc0,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0xc0,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_u32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0xc0,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0xc0,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_u32 v5, off, s[8:11], s3 ; encoding: [0x03,0xc0,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0xc0,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_max_u32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0xc0,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0xc0,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_max_u32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0e,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0e,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0e,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0e,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0e,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0e,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_u32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0e,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0e,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_u32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0xc0,0x0e,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0e,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_u32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0xc0,0x0e,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0e,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_u64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x12,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x12,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_u64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x00,0x12,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x12,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_u64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x00,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x00,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_u64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x00,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x00,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_u64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x00,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x00,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_max_u64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x00,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_max_u64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x00,0x12,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x12,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x00,0x12,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x12,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x00,0x12,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x12,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_u64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x00,0x12,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x12,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_u64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x00,0x12,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x12,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_max_u64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x00,0x12,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x12,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_i32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x0e,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0e,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_i32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x00,0x0e,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0e,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_i32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x00,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x00,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_i32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x00,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x00,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_i32 v5, off, s[8:11], s3 ; encoding: [0x03,0x00,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x00,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_min_i32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x00,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_min_i32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_i32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x00,0x0e,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0e,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_i32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x00,0x0e,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0e,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_i32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x00,0x0e,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0e,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_i32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x00,0x0e,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0e,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_i32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x00,0x0e,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0e,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_i32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x00,0x0e,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x0e,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_i64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x11,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x11,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_i64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x11,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x11,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_i64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x40,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_i64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x40,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_i64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x40,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x40,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_min_i64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x40,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_min_i64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_i64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x11,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x11,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_i64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x40,0x11,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x11,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_i64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x40,0x11,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x11,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_i64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x11,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x11,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_i64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x11,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x11,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_i64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x11,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x11,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_num_f32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x14,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x14,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_num_f32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x14,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x14,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_num_f32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x40,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_num_f32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x40,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_num_f32 v5, off, s[8:11], s3 ; encoding: [0x03,0x40,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x40,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_min_num_f32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x40,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_min_num_f32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x14,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_num_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x14,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x14,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_num_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x40,0x14,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x14,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_num_f32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x40,0x14,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x14,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_num_f32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x14,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x14,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_num_f32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x14,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x14,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_num_f32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x14,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x14,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_u32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x0e,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0e,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_u32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x0e,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0e,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_u32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x40,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_u32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x40,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_u32 v5, off, s[8:11], s3 ; encoding: [0x03,0x40,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x40,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_min_u32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x40,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_min_u32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0e,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x0e,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0e,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x40,0x0e,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0e,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x40,0x0e,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0e,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_u32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x0e,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0e,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_u32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x0e,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0e,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_u32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x0e,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0e,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_u64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x11,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x11,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_u64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x11,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x11,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_u64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x80,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_u64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x80,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_u64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x80,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x80,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_min_u64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x80,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_min_u64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x11,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x11,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x80,0x11,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x11,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x80,0x11,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x11,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_u64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x11,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x11,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_u64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x11,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x11,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_min_u64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x11,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x11,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_or_b32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x0f,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0f,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_or_b32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x0f,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0f,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_or_b32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x40,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_or_b32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x40,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_or_b32 v5, off, s[8:11], s3 ; encoding: [0x03,0x40,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x40,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_or_b32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x40,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_or_b32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_or_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x0f,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0f,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_or_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x40,0x0f,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0f,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_or_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x40,0x0f,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0f,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_or_b32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x0f,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0f,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_or_b32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x0f,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0f,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_or_b32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x0f,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x0f,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_or_b64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x12,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x12,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_or_b64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x12,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x12,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_or_b64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x80,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_or_b64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x80,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_or_b64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x80,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x80,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_or_b64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x80,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_or_b64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_or_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x12,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x12,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_or_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x80,0x12,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x12,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_or_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x80,0x12,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x12,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_or_b64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x12,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x12,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_or_b64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x12,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x12,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_or_b64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x12,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x12,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_pk_add_bf16 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x16,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x16,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_pk_add_bf16 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x16,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x16,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_pk_add_bf16 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x80,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_pk_add_bf16 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x80,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_pk_add_bf16 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x80,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_pk_add_bf16 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x80,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_pk_add_bf16 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_pk_add_bf16 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x16,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x16,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_pk_add_bf16 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x80,0x16,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x16,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_pk_add_bf16 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x80,0x16,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x16,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_pk_add_bf16 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x16,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x16,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_pk_add_bf16 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x16,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x16,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_pk_add_bf16 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x16,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x16,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_pk_add_f16 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x16,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x16,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_pk_add_f16 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x16,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x16,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_pk_add_f16 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x40,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_pk_add_f16 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x40,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_pk_add_f16 v5, off, s[8:11], s3 ; encoding: [0x03,0x40,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x40,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_pk_add_f16 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x40,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_pk_add_f16 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x16,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_pk_add_f16 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x16,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x16,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_pk_add_f16 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x40,0x16,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x16,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_pk_add_f16 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x40,0x16,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x16,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_pk_add_f16 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x16,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x16,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_pk_add_f16 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x16,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x16,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_pk_add_f16 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x16,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x16,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v255, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0d,0xc4,0xff,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0d,0xc4,0xff,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v255, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0d,0xc4,0xff,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0d,0xc4,0xff,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v255, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0d,0xc4,0xff,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0d,0xc4,0xff,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[12:15], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0d,0xc4,0x05,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[12:15], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x18,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0d,0xc4,0x05,0x18,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[12:15], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x18,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0d,0xc4,0x05,0x18,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], m0 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x7d,0xc0,0x0d,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0xc0,0x0d,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], m0 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x7d,0xc0,0x0d,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0xc0,0x0d,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], m0 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x7d,0xc0,0x0d,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0xc0,0x0d,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s101 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x65,0xc0,0x0d,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x65,0xc0,0x0d,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s101 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x65,0xc0,0x0d,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x65,0xc0,0x0d,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s101 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x65,0xc0,0x0d,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x65,0xc0,0x0d,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:7 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0xe8,0x00,0x00,0x07,0x00,0x00]
+0x03,0xc0,0x0d,0xc4,0x05,0x10,0xe8,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:7 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x90,0x00,0x00,0x07,0x00,0x00]
+0x03,0xc0,0x0d,0xc4,0x05,0x10,0x90,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:7 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x94,0x00,0x00,0x07,0x00,0x00]
+0x03,0xc0,0x0d,0xc4,0x05,0x10,0x94,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0d,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0d,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0d,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0xe8,0x00,0x00,0x00,0x00,0x00]
+0x03,0xc0,0x0d,0xc4,0x05,0x10,0xe8,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x90,0x00,0x00,0x00,0x00,0x00]
+0x03,0xc0,0x0d,0xc4,0x05,0x10,0x90,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[8:11], s3 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x94,0x00,0x00,0x00,0x00,0x00]
+0x03,0xc0,0x0d,0xc4,0x05,0x10,0x94,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[96:99], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0xc0,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0d,0xc4,0x05,0xc0,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[96:99], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0xc0,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0d,0xc4,0x05,0xc0,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, off, s[96:99], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0xc0,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0d,0xc4,0x05,0xc0,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, v0, s[8:11], s3 idxen offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0xe8,0x80,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0d,0xc4,0x05,0x10,0xe8,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, v0, s[8:11], s3 idxen offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x90,0x80,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0d,0xc4,0x05,0x10,0x90,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, v0, s[8:11], s3 idxen offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x94,0x80,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0d,0xc4,0x05,0x10,0x94,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, v0, s[8:11], s3 offen offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0xe8,0x40,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0d,0xc4,0x05,0x10,0xe8,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, v0, s[8:11], s3 offen offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x90,0x40,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0d,0xc4,0x05,0x10,0x90,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_clamp_u32 v5, v0, s[8:11], s3 offen offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0d,0xc4,0x05,0x10,0x94,0x40,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0d,0xc4,0x05,0x10,0x94,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_u32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x0d,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0d,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_u32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x0d,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0d,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_u32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x80,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_u32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x80,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_u32 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x80,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_sub_u32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x80,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_sub_u32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0d,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x0d,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0d,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x80,0x0d,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0d,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_u32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x80,0x0d,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0d,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_u32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x0d,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0d,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_u32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x0d,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0d,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_u32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x0d,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0d,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_u64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x11,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x11,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_u64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x00,0x11,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x11,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_u64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x00,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x00,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_u64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x00,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x00,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_u64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x00,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x00,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_sub_u64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x00,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_sub_u64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x11,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x00,0x11,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x11,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x00,0x11,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x11,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_u64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x00,0x11,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x11,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_u64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x00,0x11,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x11,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_u64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x00,0x11,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x11,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_sub_u64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x00,0x11,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x11,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_swap_b32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0c,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0c,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_swap_b32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0c,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0c,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_swap_b32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0xc0,0x0c,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0xc0,0x0c,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_swap_b32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0xc0,0x0c,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0xc0,0x0c,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_swap_b32 v5, off, s[8:11], s3 ; encoding: [0x03,0xc0,0x0c,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0xc0,0x0c,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_swap_b32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0xc0,0x0c,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0xc0,0x0c,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_swap_b32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0c,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0c,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_swap_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x0c,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0c,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_swap_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x0c,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0c,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_swap_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x0c,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0c,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_swap_b32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0xc0,0x0c,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0c,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_swap_b32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0xc0,0x0c,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0c,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_swap_b32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0xc0,0x0c,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x0c,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_swap_b64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x10,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x10,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_swap_b64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x10,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x10,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_swap_b64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x40,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_swap_b64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x40,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_swap_b64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x40,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x40,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_swap_b64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x40,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_swap_b64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x10,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_swap_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x10,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x10,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_swap_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x40,0x10,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x10,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_swap_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x40,0x10,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x10,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_swap_b64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x10,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x10,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_swap_b64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x10,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x10,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_swap_b64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x10,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x10,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_xor_b32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x0f,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0f,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_xor_b32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x0f,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0f,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_xor_b32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x80,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_xor_b32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x80,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_xor_b32 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x80,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_xor_b32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x80,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_xor_b32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0f,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_xor_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x0f,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0f,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_xor_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0x80,0x0f,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0f,0xc4,0x05,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_xor_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0x80,0x0f,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0f,0xc4,0x05,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_xor_b32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x0f,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0f,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_xor_b32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x0f,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0f,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_xor_b32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x0f,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x0f,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_xor_b64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x12,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x12,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_xor_b64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0xc0,0x12,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x12,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_xor_b64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0xc0,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0xc0,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_xor_b64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0xc0,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0xc0,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_xor_b64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0xc0,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0xc0,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_atomic_xor_b64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0xc0,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0xc0,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_atomic_xor_b64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x12,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_xor_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_CASCADE_NT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x12,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x12,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_xor_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN ; encoding: [0x03,0xc0,0x12,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x12,0xc4,0x06,0x10,0x90,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_xor_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_ATOMIC_RETURN scope:SCOPE_SE ; encoding: [0x03,0xc0,0x12,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x12,0xc4,0x06,0x10,0x94,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_xor_b64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0xc0,0x12,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x12,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_xor_b64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0xc0,0x12,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x12,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_atomic_xor_b64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0xc0,0x12,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x12,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b128 v[252:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x05,0xc4,0xfc,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x05,0xc4,0xfc,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b128 v[6:9], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0xc0,0x05,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x05,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b128 v[6:9], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0xc0,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0xc0,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b128 v[6:9], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0xc0,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0xc0,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b128 v[6:9], off, s[8:11], s3 ; encoding: [0x03,0xc0,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0xc0,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_load_b128 v[6:9], off, s[8:11], s3 offset:7 ; encoding: [0x03,0xc0,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0xc0,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_load_b128 v[6:9], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b128 v[6:9], off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0xc0,0x05,0xc4,0x06,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x05,0xc4,0x06,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b128 v[6:9], off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x05,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x05,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b128 v[6:9], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0xc0,0x05,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x05,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b128 v[6:9], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0xc0,0x05,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x05,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b128 v[6:9], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0xc0,0x05,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x05,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b32 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x05,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x05,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b32 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x00,0x05,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x05,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b32 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x00,0x05,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x00,0x05,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b32 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x00,0x05,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x00,0x05,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b32 v5, off, s[8:11], s3 ; encoding: [0x03,0x00,0x05,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x00,0x05,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_load_b32 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x05,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x00,0x05,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_load_b32 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x05,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x05,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0x00,0x05,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x05,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b32 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0x00,0x05,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x05,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b32 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x00,0x05,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x05,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b32 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x00,0x05,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x05,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b32 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x00,0x05,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x05,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b64 v[254:255], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x05,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x05,0xc4,0xfe,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b64 v[6:7], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x05,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x05,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b64 v[6:7], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x40,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b64 v[6:7], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x40,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b64 v[6:7], off, s[8:11], s3 ; encoding: [0x03,0x40,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x40,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_load_b64 v[6:7], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x40,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_load_b64 v[6:7], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0x40,0x05,0xc4,0x06,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x05,0xc4,0x06,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b64 v[6:7], off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x05,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x05,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b64 v[6:7], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x05,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x05,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b64 v[6:7], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x05,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x05,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b64 v[6:7], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x05,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x05,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b96 v[252:254], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x05,0xc4,0xfc,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x05,0xc4,0xfc,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b96 v[6:8], off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x05,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x05,0xc4,0x06,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b96 v[6:8], off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x80,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b96 v[6:8], off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x80,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b96 v[6:8], off, s[8:11], s3 ; encoding: [0x03,0x80,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x80,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_load_b96 v[6:8], off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x80,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_load_b96 v[6:8], off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x05,0xc4,0x06,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b96 v[6:8], off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0x80,0x05,0xc4,0x06,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x05,0xc4,0x06,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b96 v[6:8], off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x05,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x05,0xc4,0x06,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b96 v[6:8], off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x05,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x05,0xc4,0x06,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b96 v[6:8], v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x05,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x05,0xc4,0x06,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_b96 v[6:8], v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x05,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x05,0xc4,0x06,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_b16 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x08,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x08,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_b16 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x00,0x08,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x08,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_b16 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x00,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x00,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_b16 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x00,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x00,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_b16 v5, off, s[8:11], s3 ; encoding: [0x03,0x00,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x00,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_load_d16_b16 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x00,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_load_d16_b16 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_b16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0x00,0x08,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x08,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_b16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0x00,0x08,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x08,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_b16 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x00,0x08,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x08,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_b16 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x00,0x08,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x08,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_b16 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x00,0x08,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x08,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_b16 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x08,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x08,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_b16 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0xc0,0x08,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x08,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_b16 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0xc0,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0xc0,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_b16 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0xc0,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0xc0,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_b16 v5, off, s[8:11], s3 ; encoding: [0x03,0xc0,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0xc0,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_load_d16_hi_b16 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0xc0,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0xc0,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_load_d16_hi_b16 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_b16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0xc0,0x08,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x08,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_b16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x08,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x08,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_b16 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0xc0,0x08,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x08,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_b16 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0xc0,0x08,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x08,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_b16 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0xc0,0x08,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x08,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_i8 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x08,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x08,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_i8 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x08,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x08,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_i8 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x80,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_i8 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x80,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_i8 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x80,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_load_d16_hi_i8 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x80,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_load_d16_hi_i8 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_i8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0x80,0x08,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x08,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_i8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x08,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x08,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_i8 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x08,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x08,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_i8 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x08,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x08,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_i8 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x08,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x08,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_u8 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x08,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x08,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_u8 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x08,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x08,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_u8 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x40,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_u8 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x40,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_u8 v5, off, s[8:11], s3 ; encoding: [0x03,0x40,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x40,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_load_d16_hi_u8 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x40,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_load_d16_hi_u8 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x08,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_u8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0x40,0x08,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x08,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_u8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x08,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x08,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_u8 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x08,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x08,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_u8 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x08,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x08,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_hi_u8 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x08,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x08,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_i8 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x07,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x07,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_i8 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0xc0,0x07,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x07,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_i8 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0xc0,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0xc0,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_i8 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0xc0,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0xc0,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_i8 v5, off, s[8:11], s3 ; encoding: [0x03,0xc0,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0xc0,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_load_d16_i8 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0xc0,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0xc0,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_load_d16_i8 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_i8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0xc0,0x07,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x07,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_i8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x07,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x07,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_i8 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0xc0,0x07,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x07,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_i8 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0xc0,0x07,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x07,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_i8 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0xc0,0x07,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x07,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_u8 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x07,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x07,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_u8 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x07,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x07,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_u8 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x80,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_u8 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x80,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_u8 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x80,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_load_d16_u8 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x80,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_load_d16_u8 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x07,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_u8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0x80,0x07,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x07,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_u8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x07,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x07,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_u8 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x07,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x07,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_u8 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x07,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x07,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_d16_u8 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x07,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x07,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_i16 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x04,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x04,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_i16 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0xc0,0x04,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x04,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_i16 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0xc0,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0xc0,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_i16 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0xc0,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0xc0,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_i16 v5, off, s[8:11], s3 ; encoding: [0x03,0xc0,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0xc0,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_load_i16 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0xc0,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0xc0,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_load_i16 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0xc0,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_i16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0xc0,0x04,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x04,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_i16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0xc0,0x04,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x04,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_i16 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0xc0,0x04,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x04,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_i16 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0xc0,0x04,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x04,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_i16 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0xc0,0x04,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0xc0,0x04,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_i8 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x04,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x04,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_i8 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x40,0x04,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x04,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_i8 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x40,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x40,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_i8 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x40,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x40,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_i8 v5, off, s[8:11], s3 ; encoding: [0x03,0x40,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x40,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_load_i8 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x40,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x40,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_load_i8 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x40,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_i8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0x40,0x04,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x04,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_i8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0x40,0x04,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x04,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_i8 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x40,0x04,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x04,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_i8 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x40,0x04,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x04,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_i8 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x40,0x04,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x40,0x04,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_u16 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x04,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x04,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_u16 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x80,0x04,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x04,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_u16 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x80,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x80,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_u16 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x80,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x80,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_u16 v5, off, s[8:11], s3 ; encoding: [0x03,0x80,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x80,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_load_u16 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x80,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x80,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_load_u16 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x80,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_u16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0x80,0x04,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x04,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_u16 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0x80,0x04,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x04,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_u16 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x80,0x04,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x04,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_u16 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x80,0x04,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x04,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_u16 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x80,0x04,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x80,0x04,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_u8 v255, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x04,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x04,0xc4,0xff,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_u8 v5, off, s[12:15], s3 offset:8388607 ; encoding: [0x03,0x00,0x04,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x04,0xc4,0x05,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_u8 v5, off, s[8:11], m0 offset:8388607 ; encoding: [0x7d,0x00,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x00,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_u8 v5, off, s[8:11], s101 offset:8388607 ; encoding: [0x65,0x00,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x00,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_u8 v5, off, s[8:11], s3 ; encoding: [0x03,0x00,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00]
+0x03,0x00,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_load_u8 v5, off, s[8:11], s3 offset:7 ; encoding: [0x03,0x00,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00]
+0x03,0x00,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_load_u8 v5, off, s[8:11], s3 offset:8388607 ; encoding: [0x03,0x00,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x04,0xc4,0x05,0x10,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_u8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_BYPASS scope:SCOPE_SYS ; encoding: [0x03,0x00,0x04,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x04,0xc4,0x05,0x10,0xbc,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_u8 v5, off, s[8:11], s3 offset:8388607 th:TH_LOAD_NT_HT scope:SCOPE_DEV ; encoding: [0x03,0x00,0x04,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x04,0xc4,0x05,0x10,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_u8 v5, off, s[96:99], s3 offset:8388607 ; encoding: [0x03,0x00,0x04,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x04,0xc4,0x05,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_u8 v5, v0, s[8:11], s3 idxen offset:8388607 ; encoding: [0x03,0x00,0x04,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x04,0xc4,0x05,0x10,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_load_u8 v5, v0, s[8:11], s3 offen offset:8388607 ; encoding: [0x03,0x00,0x04,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x03,0x00,0x04,0xc4,0x05,0x10,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b128 v[252:255], off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x40,0x07,0xc4,0xfc,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x07,0xc4,0xfc,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b128 v[2:5], off, s[12:15], m0 offset:8388607 ; encoding: [0x7d,0x40,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x40,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b128 v[2:5], off, s[12:15], s101 offset:8388607 ; encoding: [0x65,0x40,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x40,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b128 v[2:5], off, s[12:15], s4 ; encoding: [0x04,0x40,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+0x04,0x40,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_store_b128 v[2:5], off, s[12:15], s4 offset:7 ; encoding: [0x04,0x40,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0x07,0x00,0x00]
+0x04,0x40,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_store_b128 v[2:5], off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x40,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b128 v[2:5], off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS ; encoding: [0x04,0x40,0x07,0xc4,0x02,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x07,0xc4,0x02,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b128 v[2:5], off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV ; encoding: [0x04,0x40,0x07,0xc4,0x02,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x07,0xc4,0x02,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b128 v[2:5], off, s[16:19], s4 offset:8388607 ; encoding: [0x04,0x40,0x07,0xc4,0x02,0x20,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x07,0xc4,0x02,0x20,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b128 v[2:5], off, s[96:99], s4 offset:8388607 ; encoding: [0x04,0x40,0x07,0xc4,0x02,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x07,0xc4,0x02,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b128 v[2:5], v0, s[12:15], s4 idxen offset:8388607 ; encoding: [0x04,0x40,0x07,0xc4,0x02,0x18,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x07,0xc4,0x02,0x18,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b128 v[2:5], v0, s[12:15], s4 offen offset:8388607 ; encoding: [0x04,0x40,0x07,0xc4,0x02,0x18,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x07,0xc4,0x02,0x18,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b16 v1, off, s[12:15], m0 offset:8388607 ; encoding: [0x7d,0x40,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x40,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b16 v1, off, s[12:15], s101 offset:8388607 ; encoding: [0x65,0x40,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x40,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b16 v1, off, s[12:15], s4 ; encoding: [0x04,0x40,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+0x04,0x40,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_store_b16 v1, off, s[12:15], s4 offset:7 ; encoding: [0x04,0x40,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0x07,0x00,0x00]
+0x04,0x40,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_store_b16 v1, off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x40,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b16 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS ; encoding: [0x04,0x40,0x06,0xc4,0x01,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x06,0xc4,0x01,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b16 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV ; encoding: [0x04,0x40,0x06,0xc4,0x01,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x06,0xc4,0x01,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b16 v1, off, s[16:19], s4 offset:8388607 ; encoding: [0x04,0x40,0x06,0xc4,0x01,0x20,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x06,0xc4,0x01,0x20,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b16 v1, off, s[96:99], s4 offset:8388607 ; encoding: [0x04,0x40,0x06,0xc4,0x01,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x06,0xc4,0x01,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b16 v1, v0, s[12:15], s4 idxen offset:8388607 ; encoding: [0x04,0x40,0x06,0xc4,0x01,0x18,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x06,0xc4,0x01,0x18,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b16 v1, v0, s[12:15], s4 offen offset:8388607 ; encoding: [0x04,0x40,0x06,0xc4,0x01,0x18,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x06,0xc4,0x01,0x18,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b16 v255, off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x40,0x06,0xc4,0xff,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x06,0xc4,0xff,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b32 v1, off, s[12:15], m0 offset:8388607 ; encoding: [0x7d,0x80,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x80,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b32 v1, off, s[12:15], s101 offset:8388607 ; encoding: [0x65,0x80,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x80,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b32 v1, off, s[12:15], s4 ; encoding: [0x04,0x80,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+0x04,0x80,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_store_b32 v1, off, s[12:15], s4 offset:7 ; encoding: [0x04,0x80,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0x07,0x00,0x00]
+0x04,0x80,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_store_b32 v1, off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x80,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x80,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b32 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS ; encoding: [0x04,0x80,0x06,0xc4,0x01,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x80,0x06,0xc4,0x01,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b32 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV ; encoding: [0x04,0x80,0x06,0xc4,0x01,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x80,0x06,0xc4,0x01,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b32 v1, off, s[16:19], s4 offset:8388607 ; encoding: [0x04,0x80,0x06,0xc4,0x01,0x20,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x80,0x06,0xc4,0x01,0x20,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b32 v1, off, s[96:99], s4 offset:8388607 ; encoding: [0x04,0x80,0x06,0xc4,0x01,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x80,0x06,0xc4,0x01,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b32 v1, v0, s[12:15], s4 idxen offset:8388607 ; encoding: [0x04,0x80,0x06,0xc4,0x01,0x18,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x04,0x80,0x06,0xc4,0x01,0x18,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b32 v1, v0, s[12:15], s4 offen offset:8388607 ; encoding: [0x04,0x80,0x06,0xc4,0x01,0x18,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x04,0x80,0x06,0xc4,0x01,0x18,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b32 v255, off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x80,0x06,0xc4,0xff,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x80,0x06,0xc4,0xff,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b64 v[254:255], off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0xc0,0x06,0xc4,0xfe,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0xc0,0x06,0xc4,0xfe,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b64 v[2:3], off, s[12:15], m0 offset:8388607 ; encoding: [0x7d,0xc0,0x06,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0xc0,0x06,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b64 v[2:3], off, s[12:15], s101 offset:8388607 ; encoding: [0x65,0xc0,0x06,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0xc0,0x06,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b64 v[2:3], off, s[12:15], s4 ; encoding: [0x04,0xc0,0x06,0xc4,0x02,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+0x04,0xc0,0x06,0xc4,0x02,0x18,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_store_b64 v[2:3], off, s[12:15], s4 offset:7 ; encoding: [0x04,0xc0,0x06,0xc4,0x02,0x18,0x80,0x00,0x00,0x07,0x00,0x00]
+0x04,0xc0,0x06,0xc4,0x02,0x18,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_store_b64 v[2:3], off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0xc0,0x06,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0xc0,0x06,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b64 v[2:3], off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS ; encoding: [0x04,0xc0,0x06,0xc4,0x02,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f]
+0x04,0xc0,0x06,0xc4,0x02,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b64 v[2:3], off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV ; encoding: [0x04,0xc0,0x06,0xc4,0x02,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x04,0xc0,0x06,0xc4,0x02,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b64 v[2:3], off, s[16:19], s4 offset:8388607 ; encoding: [0x04,0xc0,0x06,0xc4,0x02,0x20,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0xc0,0x06,0xc4,0x02,0x20,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b64 v[2:3], off, s[96:99], s4 offset:8388607 ; encoding: [0x04,0xc0,0x06,0xc4,0x02,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0xc0,0x06,0xc4,0x02,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b64 v[2:3], v0, s[12:15], s4 idxen offset:8388607 ; encoding: [0x04,0xc0,0x06,0xc4,0x02,0x18,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x04,0xc0,0x06,0xc4,0x02,0x18,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b64 v[2:3], v0, s[12:15], s4 offen offset:8388607 ; encoding: [0x04,0xc0,0x06,0xc4,0x02,0x18,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x04,0xc0,0x06,0xc4,0x02,0x18,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b8 v1, off, s[12:15], m0 offset:8388607 ; encoding: [0x7d,0x00,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x00,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b8 v1, off, s[12:15], s101 offset:8388607 ; encoding: [0x65,0x00,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x00,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b8 v1, off, s[12:15], s4 ; encoding: [0x04,0x00,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+0x04,0x00,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_store_b8 v1, off, s[12:15], s4 offset:7 ; encoding: [0x04,0x00,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0x07,0x00,0x00]
+0x04,0x00,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_store_b8 v1, off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x00,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x06,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b8 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS ; encoding: [0x04,0x00,0x06,0xc4,0x01,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x06,0xc4,0x01,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b8 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV ; encoding: [0x04,0x00,0x06,0xc4,0x01,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x06,0xc4,0x01,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b8 v1, off, s[16:19], s4 offset:8388607 ; encoding: [0x04,0x00,0x06,0xc4,0x01,0x20,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x06,0xc4,0x01,0x20,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b8 v1, off, s[96:99], s4 offset:8388607 ; encoding: [0x04,0x00,0x06,0xc4,0x01,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x06,0xc4,0x01,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b8 v1, v0, s[12:15], s4 idxen offset:8388607 ; encoding: [0x04,0x00,0x06,0xc4,0x01,0x18,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x06,0xc4,0x01,0x18,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b8 v1, v0, s[12:15], s4 offen offset:8388607 ; encoding: [0x04,0x00,0x06,0xc4,0x01,0x18,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x06,0xc4,0x01,0x18,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b8 v255, off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x00,0x06,0xc4,0xff,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x06,0xc4,0xff,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b96 v[252:254], off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x00,0x07,0xc4,0xfc,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x07,0xc4,0xfc,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b96 v[2:4], off, s[12:15], m0 offset:8388607 ; encoding: [0x7d,0x00,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x00,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b96 v[2:4], off, s[12:15], s101 offset:8388607 ; encoding: [0x65,0x00,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x00,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b96 v[2:4], off, s[12:15], s4 ; encoding: [0x04,0x00,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+0x04,0x00,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_store_b96 v[2:4], off, s[12:15], s4 offset:7 ; encoding: [0x04,0x00,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0x07,0x00,0x00]
+0x04,0x00,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_store_b96 v[2:4], off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x00,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x07,0xc4,0x02,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b96 v[2:4], off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS ; encoding: [0x04,0x00,0x07,0xc4,0x02,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x07,0xc4,0x02,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b96 v[2:4], off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV ; encoding: [0x04,0x00,0x07,0xc4,0x02,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x07,0xc4,0x02,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b96 v[2:4], off, s[16:19], s4 offset:8388607 ; encoding: [0x04,0x00,0x07,0xc4,0x02,0x20,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x07,0xc4,0x02,0x20,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b96 v[2:4], off, s[96:99], s4 offset:8388607 ; encoding: [0x04,0x00,0x07,0xc4,0x02,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x07,0xc4,0x02,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b96 v[2:4], v0, s[12:15], s4 idxen offset:8388607 ; encoding: [0x04,0x00,0x07,0xc4,0x02,0x18,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x07,0xc4,0x02,0x18,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_b96 v[2:4], v0, s[12:15], s4 offen offset:8388607 ; encoding: [0x04,0x00,0x07,0xc4,0x02,0x18,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x07,0xc4,0x02,0x18,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_d16_hi_b16 v1, off, s[12:15], m0 offset:8388607 ; encoding: [0x7d,0x40,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x40,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_d16_hi_b16 v1, off, s[12:15], s101 offset:8388607 ; encoding: [0x65,0x40,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x40,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_d16_hi_b16 v1, off, s[12:15], s4 ; encoding: [0x04,0x40,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+0x04,0x40,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_store_d16_hi_b16 v1, off, s[12:15], s4 offset:7 ; encoding: [0x04,0x40,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0x07,0x00,0x00]
+0x04,0x40,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_store_d16_hi_b16 v1, off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x40,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_d16_hi_b16 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS ; encoding: [0x04,0x40,0x09,0xc4,0x01,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x09,0xc4,0x01,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_d16_hi_b16 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV ; encoding: [0x04,0x40,0x09,0xc4,0x01,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x09,0xc4,0x01,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_d16_hi_b16 v1, off, s[16:19], s4 offset:8388607 ; encoding: [0x04,0x40,0x09,0xc4,0x01,0x20,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x09,0xc4,0x01,0x20,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_d16_hi_b16 v1, off, s[96:99], s4 offset:8388607 ; encoding: [0x04,0x40,0x09,0xc4,0x01,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x09,0xc4,0x01,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_d16_hi_b16 v1, v0, s[12:15], s4 idxen offset:8388607 ; encoding: [0x04,0x40,0x09,0xc4,0x01,0x18,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x09,0xc4,0x01,0x18,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_d16_hi_b16 v1, v0, s[12:15], s4 offen offset:8388607 ; encoding: [0x04,0x40,0x09,0xc4,0x01,0x18,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x09,0xc4,0x01,0x18,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_d16_hi_b16 v255, off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x40,0x09,0xc4,0xff,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x40,0x09,0xc4,0xff,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_d16_hi_b8 v1, off, s[12:15], m0 offset:8388607 ; encoding: [0x7d,0x00,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x7d,0x00,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_d16_hi_b8 v1, off, s[12:15], s101 offset:8388607 ; encoding: [0x65,0x00,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x65,0x00,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_d16_hi_b8 v1, off, s[12:15], s4 ; encoding: [0x04,0x00,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0x00,0x00,0x00]
+0x04,0x00,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0x00,0x00,0x00
+
+# GFX1250: buffer_store_d16_hi_b8 v1, off, s[12:15], s4 offset:7 ; encoding: [0x04,0x00,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0x07,0x00,0x00]
+0x04,0x00,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0x07,0x00,0x00
+
+# GFX1250: buffer_store_d16_hi_b8 v1, off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x00,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x09,0xc4,0x01,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_d16_hi_b8 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_BYPASS scope:SCOPE_SYS ; encoding: [0x04,0x00,0x09,0xc4,0x01,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x09,0xc4,0x01,0x18,0xbc,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_d16_hi_b8 v1, off, s[12:15], s4 offset:8388607 th:TH_STORE_NT_HT scope:SCOPE_DEV ; encoding: [0x04,0x00,0x09,0xc4,0x01,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x09,0xc4,0x01,0x18,0xe8,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_d16_hi_b8 v1, off, s[16:19], s4 offset:8388607 ; encoding: [0x04,0x00,0x09,0xc4,0x01,0x20,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x09,0xc4,0x01,0x20,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_d16_hi_b8 v1, off, s[96:99], s4 offset:8388607 ; encoding: [0x04,0x00,0x09,0xc4,0x01,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x09,0xc4,0x01,0xc0,0x80,0x00,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_d16_hi_b8 v1, v0, s[12:15], s4 idxen offset:8388607 ; encoding: [0x04,0x00,0x09,0xc4,0x01,0x18,0x80,0x80,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x09,0xc4,0x01,0x18,0x80,0x80,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_d16_hi_b8 v1, v0, s[12:15], s4 offen offset:8388607 ; encoding: [0x04,0x00,0x09,0xc4,0x01,0x18,0x80,0x40,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x09,0xc4,0x01,0x18,0x80,0x40,0x00,0xff,0xff,0x7f
+
+# GFX1250: buffer_store_d16_hi_b8 v255, off, s[12:15], s4 offset:8388607 ; encoding: [0x04,0x00,0x09,0xc4,0xff,0x18,0x80,0x00,0x00,0xff,0xff,0x7f]
+0x04,0x00,0x09,0xc4,0xff,0x18,0x80,0x00,0x00,0xff,0xff,0x7f
+
# GFX1250: buffer_atomic_and_b32 v5, v1, s[8:11], s3 offen offset:4095 nv ; encoding: [0x83,0x00,0x0f,0xc4,0x05,0x10,0x80,0x40,0x01,0xff,0x0f,0x00]
0x83,0x00,0x0f,0xc4,0x05,0x10,0x80,0x40,0x01,0xff,0x0f,0x00
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3cx.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3cx.txt
new file mode 100644
index 0000000..e419e4583
--- /dev/null
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3cx.txt
@@ -0,0 +1,3413 @@
+# NOTE: Assertions have been autogenerated by utils/update_mc_test_checks.py UTC_ARGS: --version 5
+# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX1250 %s
+
+0x7e,0x00,0xfd,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_class_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xfd,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x01,0xfd,0xd4,0xff,0xd6,0x00,0x20,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_class_f16_e64 -|0xfe0b|, vcc_hi ; encoding: [0x7e,0x01,0xfd,0xd4,0xff,0xd6,0x00,0x20,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xfd,0xd4,0xf0,0xfa,0x00,0x00
+# GFX1250: v_cmpx_class_f16_e64 0.5, m0 ; encoding: [0x7e,0x00,0xfd,0xd4,0xf0,0xfa,0x00,0x00]
+
+0x7e,0x00,0xfd,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_class_f16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xfd,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xfd,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_class_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xfd,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xfd,0xd4,0x7d,0xfa,0x01,0x00
+# GFX1250: v_cmpx_class_f16_e64 m0, src_scc ; encoding: [0x7e,0x00,0xfd,0xd4,0x7d,0xfa,0x01,0x00]
+
+0x7e,0x00,0xfd,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_class_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xfd,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xfd,0xd4,0x01,0x04,0x02,0x00
+# GFX1250: v_cmpx_class_f16_e64 s1, v2 ; encoding: [0x7e,0x00,0xfd,0xd4,0x01,0x04,0x02,0x00]
+
+0x7e,0x00,0xfd,0xd4,0x69,0xfe,0x03,0x00
+# GFX1250: v_cmpx_class_f16_e64 s105, v255 ; encoding: [0x7e,0x00,0xfd,0xd4,0x69,0xfe,0x03,0x00]
+
+0x7e,0x00,0xfd,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_class_f16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xfd,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xfd,0xd4,0x7b,0xf6,0x00,0x00
+# GFX1250: v_cmpx_class_f16_e64 ttmp15, ttmp15 ; encoding: [0x7e,0x00,0xfd,0xd4,0x7b,0xf6,0x00,0x00]
+
+0x7e,0x00,0xfd,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_class_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0xfd,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xfd,0xd4,0xff,0x05,0x02,0x00
+# GFX1250: v_cmpx_class_f16_e64 v255, v2 ; encoding: [0x7e,0x00,0xfd,0xd4,0xff,0x05,0x02,0x00]
+
+0x7e,0x00,0xfd,0xd4,0x6b,0xd2,0x00,0x00
+# GFX1250: v_cmpx_class_f16_e64 vcc_hi, s105 ; encoding: [0x7e,0x00,0xfd,0xd4,0x6b,0xd2,0x00,0x00]
+
+0x7e,0x00,0xfd,0xd4,0x6a,0x04,0x00,0x00
+# GFX1250: v_cmpx_class_f16_e64 vcc_lo, s2 ; encoding: [0x7e,0x00,0xfd,0xd4,0x6a,0x04,0x00,0x00]
+
+0x7e,0x00,0xfe,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_class_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xfe,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x01,0xfe,0xd4,0xff,0xd6,0x00,0x20,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_class_f32_e64 -|0xaf123456|, vcc_hi ; encoding: [0x7e,0x01,0xfe,0xd4,0xff,0xd6,0x00,0x20,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xfe,0xd4,0xf0,0xfa,0x00,0x00
+# GFX1250: v_cmpx_class_f32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xfe,0xd4,0xf0,0xfa,0x00,0x00]
+
+0x7e,0x00,0xfe,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_class_f32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xfe,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xfe,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_class_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xfe,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xfe,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_class_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xfe,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0xfe,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_class_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xfe,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xfe,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_class_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0xfe,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xfe,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_class_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0xfe,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xfe,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_class_f32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xfe,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xfe,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_class_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xfe,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xfe,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_class_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0xfe,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xfe,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_class_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0xfe,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xfe,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_class_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xfe,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xfe,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_class_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xfe,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xff,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_class_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xff,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x01,0xff,0xd4,0xfd,0xfa,0x01,0x20
+# GFX1250: v_cmpx_class_f64_e64 -|src_scc|, src_scc ; encoding: [0x7e,0x01,0xff,0xd4,0xfd,0xfa,0x01,0x20]
+
+0x7e,0x00,0xff,0xd4,0xf0,0xe0,0x01,0x00
+# GFX1250: v_cmpx_class_f64_e64 0.5, 0.5 ; encoding: [0x7e,0x00,0xff,0xd4,0xf0,0xe0,0x01,0x00]
+
+0x7e,0x00,0xff,0xd4,0xff,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_class_f64_e64 0xaf123456, 0xaf123456 ; encoding: [0x7e,0x00,0xff,0xd4,0xff,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xff,0xd4,0x7e,0xfc,0x00,0x00
+# GFX1250: v_cmpx_class_f64_e64 exec, exec_lo ; encoding: [0x7e,0x00,0xff,0xd4,0x7e,0xfc,0x00,0x00]
+
+0x7e,0x00,0xff,0xd4,0x7c,0xf8,0x00,0x00
+# GFX1250: v_cmpx_class_f64_e64 null, null ; encoding: [0x7e,0x00,0xff,0xd4,0x7c,0xf8,0x00,0x00]
+
+0x7e,0x00,0xff,0xd4,0x68,0xd4,0x00,0x00
+# GFX1250: v_cmpx_class_f64_e64 s[104:105], vcc_lo ; encoding: [0x7e,0x00,0xff,0xd4,0x68,0xd4,0x00,0x00]
+
+0x7e,0x00,0xff,0xd4,0x02,0xd6,0x00,0x00
+# GFX1250: v_cmpx_class_f64_e64 s[2:3], vcc_hi ; encoding: [0x7e,0x00,0xff,0xd4,0x02,0xd6,0x00,0x00]
+
+0x7e,0x00,0xff,0xd4,0x7a,0xfe,0x00,0x00
+# GFX1250: v_cmpx_class_f64_e64 ttmp[14:15], exec_hi ; encoding: [0x7e,0x00,0xff,0xd4,0x7a,0xfe,0x00,0x00]
+
+0x7e,0x00,0xff,0xd4,0xfe,0xf7,0x00,0x00
+# GFX1250: v_cmpx_class_f64_e64 v[254:255], ttmp15 ; encoding: [0x7e,0x00,0xff,0xd4,0xfe,0xf7,0x00,0x00]
+
+0x7e,0x00,0xff,0xd4,0x02,0xd3,0x00,0x00
+# GFX1250: v_cmpx_class_f64_e64 v[2:3], s105 ; encoding: [0x7e,0x00,0xff,0xd4,0x02,0xd3,0x00,0x00]
+
+0x7e,0x00,0xff,0xd4,0x02,0x05,0x00,0x00
+# GFX1250: v_cmpx_class_f64_e64 v[2:3], s2 ; encoding: [0x7e,0x00,0xff,0xd4,0x02,0x05,0x00,0x00]
+
+0x7e,0x00,0xff,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_class_f64_e64 v[2:3], v2 ; encoding: [0x7e,0x00,0xff,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xff,0xd4,0x02,0xff,0x03,0x00
+# GFX1250: v_cmpx_class_f64_e64 v[2:3], v255 ; encoding: [0x7e,0x00,0xff,0xd4,0x02,0xff,0x03,0x00]
+
+0x7e,0x00,0xff,0xd4,0x6a,0xfa,0x00,0x00
+# GFX1250: v_cmpx_class_f64_e64 vcc, m0 ; encoding: [0x7e,0x00,0xff,0xd4,0x6a,0xfa,0x00,0x00]
+
+0x7e,0x00,0x82,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_eq_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x82,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x82,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_eq_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x82,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x82,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_eq_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x82,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x82,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_eq_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x82,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x82,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_eq_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x82,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x82,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_eq_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x82,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x82,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_eq_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x82,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x82,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_eq_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x82,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x82,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_eq_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x82,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x82,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_eq_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x82,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x82,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_eq_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x82,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x82,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_eq_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x82,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x82,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_eq_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x82,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x82,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_eq_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x82,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x82,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_eq_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x82,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0x92,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_eq_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x92,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x92,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_eq_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x92,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x92,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_eq_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x92,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x92,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_eq_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x92,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x92,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_eq_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x92,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x92,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_eq_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x92,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x92,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_eq_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x92,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x92,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_eq_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x92,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x92,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_eq_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x92,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x92,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_eq_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x92,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x92,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_eq_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x92,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x92,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_eq_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x92,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x92,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_eq_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x92,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x92,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_eq_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x92,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x92,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_eq_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x92,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xa2,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_eq_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xa2,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x01,0xa2,0xd4,0x7e,0xfa,0x01,0x20
+# GFX1250: v_cmpx_eq_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xa2,0xd4,0x7e,0xfa,0x01,0x20]
+
+0x7e,0x03,0xa2,0xd4,0xfd,0xfc,0x00,0x60
+# GFX1250: v_cmpx_eq_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xa2,0xd4,0xfd,0xfc,0x00,0x60]
+
+0x7e,0x00,0xa2,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_eq_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xa2,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x82,0xa2,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_eq_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xa2,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xa2,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_eq_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xa2,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xa2,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_eq_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xa2,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xa2,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_eq_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xa2,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xa2,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_eq_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xa2,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xa2,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_eq_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xa2,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xa2,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_eq_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xa2,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xa2,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_eq_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xa2,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0xb2,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_eq_i16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xb2,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xb2,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_eq_i16_e64 0x3800, m0 ; encoding: [0x7e,0x00,0xb2,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xb2,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_eq_i16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xb2,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xb2,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_eq_i16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xb2,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xb2,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_eq_i16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xb2,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xb2,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_eq_i16_e64 m0, 0x3800 ; encoding: [0x7e,0x00,0xb2,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xb2,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_eq_i16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xb2,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xb2,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_eq_i16_e64 s1, s2 ; encoding: [0x7e,0x00,0xb2,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xb2,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_eq_i16_e64 s105, s105 ; encoding: [0x7e,0x00,0xb2,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xb2,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_eq_i16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xb2,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xb2,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_eq_i16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xb2,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xb2,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_eq_i16_e64 v1, v2 ; encoding: [0x7e,0x00,0xb2,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xb2,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_eq_i16_e64 v255, v255 ; encoding: [0x7e,0x00,0xb2,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xb2,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_eq_i16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xb2,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xb2,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_eq_i16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xb2,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xc2,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_eq_i32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xc2,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xc2,0xd4,0xf0,0xfa,0x00,0x00
+# GFX1250: v_cmpx_eq_i32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xc2,0xd4,0xf0,0xfa,0x00,0x00]
+
+0x7e,0x00,0xc2,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_eq_i32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xc2,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xc2,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_eq_i32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xc2,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xc2,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_eq_i32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xc2,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xc2,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_eq_i32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xc2,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0xc2,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_eq_i32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xc2,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xc2,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_eq_i32_e64 s1, s2 ; encoding: [0x7e,0x00,0xc2,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xc2,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_eq_i32_e64 s105, s105 ; encoding: [0x7e,0x00,0xc2,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xc2,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_eq_i32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xc2,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xc2,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_eq_i32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xc2,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xc2,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_eq_i32_e64 v1, v2 ; encoding: [0x7e,0x00,0xc2,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xc2,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_eq_i32_e64 v255, v255 ; encoding: [0x7e,0x00,0xc2,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xc2,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_eq_i32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xc2,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xc2,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_eq_i32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xc2,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xd2,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_eq_i64_e64 -1, -1 ; encoding: [0x7e,0x00,0xd2,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x00,0xd2,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_eq_i64_e64 0.5, null ; encoding: [0x7e,0x00,0xd2,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x00,0xd2,0xd4,0xff,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_eq_i64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xd2,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xd2,0xd4,0x7e,0xfa,0x01,0x00
+# GFX1250: v_cmpx_eq_i64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xd2,0xd4,0x7e,0xfa,0x01,0x00]
+
+0x7e,0x00,0xd2,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_eq_i64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xd2,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xd2,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_eq_i64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xd2,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xd2,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_eq_i64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xd2,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xd2,0xd4,0xfd,0xfc,0x00,0x00
+# GFX1250: v_cmpx_eq_i64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xd2,0xd4,0xfd,0xfc,0x00,0x00]
+
+0x7e,0x00,0xd2,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_eq_i64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xd2,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xd2,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_eq_i64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xd2,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xd2,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_eq_i64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xd2,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xd2,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_eq_i64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xd2,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0xba,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_eq_u16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xba,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xba,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_eq_u16_e64 0x3800, m0 ; encoding: [0x7e,0x00,0xba,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xba,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_eq_u16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xba,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xba,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_eq_u16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xba,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xba,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_eq_u16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xba,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xba,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_eq_u16_e64 m0, 0x3800 ; encoding: [0x7e,0x00,0xba,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xba,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_eq_u16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xba,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xba,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_eq_u16_e64 s1, s2 ; encoding: [0x7e,0x00,0xba,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xba,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_eq_u16_e64 s105, s105 ; encoding: [0x7e,0x00,0xba,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xba,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_eq_u16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xba,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xba,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_eq_u16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xba,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xba,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_eq_u16_e64 v1, v2 ; encoding: [0x7e,0x00,0xba,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xba,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_eq_u16_e64 v255, v255 ; encoding: [0x7e,0x00,0xba,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xba,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_eq_u16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xba,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xba,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_eq_u16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xba,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xca,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_eq_u32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xca,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xca,0xd4,0xf0,0xfa,0x00,0x00
+# GFX1250: v_cmpx_eq_u32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xca,0xd4,0xf0,0xfa,0x00,0x00]
+
+0x7e,0x00,0xca,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_eq_u32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xca,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xca,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_eq_u32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xca,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xca,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_eq_u32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xca,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xca,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_eq_u32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xca,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0xca,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_eq_u32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xca,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xca,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_eq_u32_e64 s1, s2 ; encoding: [0x7e,0x00,0xca,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xca,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_eq_u32_e64 s105, s105 ; encoding: [0x7e,0x00,0xca,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xca,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_eq_u32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xca,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xca,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_eq_u32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xca,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xca,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_eq_u32_e64 v1, v2 ; encoding: [0x7e,0x00,0xca,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xca,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_eq_u32_e64 v255, v255 ; encoding: [0x7e,0x00,0xca,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xca,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_eq_u32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xca,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xca,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_eq_u32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xca,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xda,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_eq_u64_e64 -1, -1 ; encoding: [0x7e,0x00,0xda,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x00,0xda,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_eq_u64_e64 0.5, null ; encoding: [0x7e,0x00,0xda,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x00,0xda,0xd4,0xff,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_eq_u64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xda,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xda,0xd4,0x7e,0xfa,0x01,0x00
+# GFX1250: v_cmpx_eq_u64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xda,0xd4,0x7e,0xfa,0x01,0x00]
+
+0x7e,0x00,0xda,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_eq_u64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xda,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xda,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_eq_u64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xda,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xda,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_eq_u64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xda,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xda,0xd4,0xfd,0xfc,0x00,0x00
+# GFX1250: v_cmpx_eq_u64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xda,0xd4,0xfd,0xfc,0x00,0x00]
+
+0x7e,0x00,0xda,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_eq_u64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xda,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xda,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_eq_u64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xda,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xda,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_eq_u64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xda,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xda,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_eq_u64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xda,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0x86,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ge_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x86,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x86,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_ge_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x86,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x86,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ge_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x86,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x86,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_ge_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x86,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x86,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_ge_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x86,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x86,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_ge_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x86,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x86,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_ge_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x86,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x86,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_ge_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x86,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x86,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_ge_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x86,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x86,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_ge_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x86,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x86,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_ge_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x86,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x86,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_ge_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x86,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x86,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ge_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x86,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x86,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_ge_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x86,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x86,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_ge_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x86,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0x96,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ge_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x96,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x96,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_ge_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x96,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x96,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ge_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x96,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x96,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_ge_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x96,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x96,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_ge_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x96,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x96,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_ge_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x96,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x96,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_ge_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x96,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x96,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_ge_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x96,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x96,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_ge_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x96,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x96,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_ge_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x96,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x96,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_ge_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x96,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x96,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_ge_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x96,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x96,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ge_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x96,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x96,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_ge_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x96,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x96,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_ge_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x96,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xa6,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_ge_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xa6,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x01,0xa6,0xd4,0x7e,0xfa,0x01,0x20
+# GFX1250: v_cmpx_ge_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xa6,0xd4,0x7e,0xfa,0x01,0x20]
+
+0x7e,0x03,0xa6,0xd4,0xfd,0xfc,0x00,0x60
+# GFX1250: v_cmpx_ge_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xa6,0xd4,0xfd,0xfc,0x00,0x60]
+
+0x7e,0x00,0xa6,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_ge_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xa6,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x82,0xa6,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ge_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xa6,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xa6,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_ge_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xa6,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xa6,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_ge_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xa6,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xa6,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_ge_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xa6,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xa6,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ge_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xa6,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xa6,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_ge_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xa6,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xa6,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_ge_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xa6,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xa6,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_ge_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xa6,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0xb6,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ge_i16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xb6,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xb6,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_ge_i16_e64 0x3800, m0 ; encoding: [0x7e,0x00,0xb6,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xb6,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ge_i16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xb6,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xb6,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_ge_i16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xb6,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xb6,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_ge_i16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xb6,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xb6,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_ge_i16_e64 m0, 0x3800 ; encoding: [0x7e,0x00,0xb6,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xb6,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_ge_i16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xb6,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xb6,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_ge_i16_e64 s1, s2 ; encoding: [0x7e,0x00,0xb6,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xb6,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_ge_i16_e64 s105, s105 ; encoding: [0x7e,0x00,0xb6,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xb6,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_ge_i16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xb6,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xb6,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_ge_i16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xb6,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xb6,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_ge_i16_e64 v1, v2 ; encoding: [0x7e,0x00,0xb6,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xb6,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_ge_i16_e64 v255, v255 ; encoding: [0x7e,0x00,0xb6,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xb6,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ge_i16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xb6,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xb6,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_ge_i16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xb6,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xc6,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ge_i32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xc6,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xc6,0xd4,0xf0,0xfa,0x00,0x00
+# GFX1250: v_cmpx_ge_i32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xc6,0xd4,0xf0,0xfa,0x00,0x00]
+
+0x7e,0x00,0xc6,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ge_i32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xc6,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xc6,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_ge_i32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xc6,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xc6,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_ge_i32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xc6,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xc6,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_ge_i32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xc6,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0xc6,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_ge_i32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xc6,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xc6,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_ge_i32_e64 s1, s2 ; encoding: [0x7e,0x00,0xc6,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xc6,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_ge_i32_e64 s105, s105 ; encoding: [0x7e,0x00,0xc6,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xc6,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_ge_i32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xc6,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xc6,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_ge_i32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xc6,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xc6,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_ge_i32_e64 v1, v2 ; encoding: [0x7e,0x00,0xc6,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xc6,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_ge_i32_e64 v255, v255 ; encoding: [0x7e,0x00,0xc6,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xc6,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ge_i32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xc6,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xc6,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_ge_i32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xc6,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xd6,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_ge_i64_e64 -1, -1 ; encoding: [0x7e,0x00,0xd6,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x00,0xd6,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_ge_i64_e64 0.5, null ; encoding: [0x7e,0x00,0xd6,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x00,0xd6,0xd4,0xff,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ge_i64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xd6,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xd6,0xd4,0x7e,0xfa,0x01,0x00
+# GFX1250: v_cmpx_ge_i64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xd6,0xd4,0x7e,0xfa,0x01,0x00]
+
+0x7e,0x00,0xd6,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_ge_i64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xd6,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xd6,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_ge_i64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xd6,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xd6,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_ge_i64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xd6,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xd6,0xd4,0xfd,0xfc,0x00,0x00
+# GFX1250: v_cmpx_ge_i64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xd6,0xd4,0xfd,0xfc,0x00,0x00]
+
+0x7e,0x00,0xd6,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ge_i64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xd6,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xd6,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_ge_i64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xd6,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xd6,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_ge_i64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xd6,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xd6,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_ge_i64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xd6,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0xbe,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ge_u16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xbe,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xbe,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_ge_u16_e64 0x3800, m0 ; encoding: [0x7e,0x00,0xbe,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xbe,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ge_u16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xbe,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xbe,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_ge_u16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xbe,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xbe,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_ge_u16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xbe,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xbe,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_ge_u16_e64 m0, 0x3800 ; encoding: [0x7e,0x00,0xbe,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xbe,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_ge_u16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xbe,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xbe,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_ge_u16_e64 s1, s2 ; encoding: [0x7e,0x00,0xbe,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xbe,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_ge_u16_e64 s105, s105 ; encoding: [0x7e,0x00,0xbe,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xbe,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_ge_u16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xbe,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xbe,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_ge_u16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xbe,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xbe,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_ge_u16_e64 v1, v2 ; encoding: [0x7e,0x00,0xbe,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xbe,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_ge_u16_e64 v255, v255 ; encoding: [0x7e,0x00,0xbe,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xbe,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ge_u16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xbe,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xbe,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_ge_u16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xbe,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xce,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ge_u32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xce,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xce,0xd4,0xf0,0xfa,0x00,0x00
+# GFX1250: v_cmpx_ge_u32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xce,0xd4,0xf0,0xfa,0x00,0x00]
+
+0x7e,0x00,0xce,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ge_u32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xce,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xce,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_ge_u32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xce,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xce,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_ge_u32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xce,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xce,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_ge_u32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xce,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0xce,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_ge_u32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xce,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xce,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_ge_u32_e64 s1, s2 ; encoding: [0x7e,0x00,0xce,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xce,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_ge_u32_e64 s105, s105 ; encoding: [0x7e,0x00,0xce,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xce,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_ge_u32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xce,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xce,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_ge_u32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xce,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xce,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_ge_u32_e64 v1, v2 ; encoding: [0x7e,0x00,0xce,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xce,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_ge_u32_e64 v255, v255 ; encoding: [0x7e,0x00,0xce,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xce,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ge_u32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xce,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xce,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_ge_u32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xce,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xde,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_ge_u64_e64 -1, -1 ; encoding: [0x7e,0x00,0xde,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x00,0xde,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_ge_u64_e64 0.5, null ; encoding: [0x7e,0x00,0xde,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x00,0xde,0xd4,0xff,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ge_u64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xde,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xde,0xd4,0x7e,0xfa,0x01,0x00
+# GFX1250: v_cmpx_ge_u64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xde,0xd4,0x7e,0xfa,0x01,0x00]
+
+0x7e,0x00,0xde,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_ge_u64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xde,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xde,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_ge_u64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xde,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xde,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_ge_u64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xde,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xde,0xd4,0xfd,0xfc,0x00,0x00
+# GFX1250: v_cmpx_ge_u64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xde,0xd4,0xfd,0xfc,0x00,0x00]
+
+0x7e,0x00,0xde,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ge_u64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xde,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xde,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_ge_u64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xde,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xde,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_ge_u64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xde,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xde,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_ge_u64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xde,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0x84,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_gt_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x84,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x84,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_gt_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x84,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x84,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_gt_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x84,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x84,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_gt_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x84,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x84,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_gt_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x84,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x84,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_gt_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x84,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x84,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_gt_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x84,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x84,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_gt_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x84,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x84,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_gt_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x84,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x84,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_gt_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x84,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x84,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_gt_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x84,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x84,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_gt_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x84,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x84,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_gt_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x84,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x84,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_gt_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x84,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x84,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_gt_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x84,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0x94,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_gt_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x94,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x94,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_gt_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x94,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x94,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_gt_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x94,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x94,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_gt_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x94,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x94,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_gt_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x94,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x94,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_gt_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x94,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x94,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_gt_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x94,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x94,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_gt_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x94,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x94,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_gt_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x94,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x94,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_gt_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x94,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x94,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_gt_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x94,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x94,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_gt_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x94,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x94,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_gt_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x94,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x94,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_gt_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x94,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x94,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_gt_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x94,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xa4,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_gt_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xa4,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x01,0xa4,0xd4,0x7e,0xfa,0x01,0x20
+# GFX1250: v_cmpx_gt_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xa4,0xd4,0x7e,0xfa,0x01,0x20]
+
+0x7e,0x03,0xa4,0xd4,0xfd,0xfc,0x00,0x60
+# GFX1250: v_cmpx_gt_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xa4,0xd4,0xfd,0xfc,0x00,0x60]
+
+0x7e,0x00,0xa4,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_gt_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xa4,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x82,0xa4,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_gt_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xa4,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xa4,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_gt_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xa4,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xa4,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_gt_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xa4,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xa4,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_gt_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xa4,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xa4,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_gt_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xa4,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xa4,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_gt_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xa4,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xa4,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_gt_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xa4,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xa4,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_gt_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xa4,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0xb4,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_gt_i16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xb4,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xb4,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_gt_i16_e64 0x3800, m0 ; encoding: [0x7e,0x00,0xb4,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xb4,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_gt_i16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xb4,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xb4,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_gt_i16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xb4,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xb4,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_gt_i16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xb4,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xb4,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_gt_i16_e64 m0, 0x3800 ; encoding: [0x7e,0x00,0xb4,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xb4,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_gt_i16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xb4,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xb4,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_gt_i16_e64 s1, s2 ; encoding: [0x7e,0x00,0xb4,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xb4,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_gt_i16_e64 s105, s105 ; encoding: [0x7e,0x00,0xb4,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xb4,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_gt_i16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xb4,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xb4,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_gt_i16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xb4,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xb4,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_gt_i16_e64 v1, v2 ; encoding: [0x7e,0x00,0xb4,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xb4,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_gt_i16_e64 v255, v255 ; encoding: [0x7e,0x00,0xb4,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xb4,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_gt_i16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xb4,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xb4,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_gt_i16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xb4,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xc4,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_gt_i32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xc4,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xc4,0xd4,0xf0,0xfa,0x00,0x00
+# GFX1250: v_cmpx_gt_i32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xc4,0xd4,0xf0,0xfa,0x00,0x00]
+
+0x7e,0x00,0xc4,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_gt_i32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xc4,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xc4,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_gt_i32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xc4,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xc4,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_gt_i32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xc4,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xc4,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_gt_i32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xc4,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0xc4,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_gt_i32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xc4,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xc4,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_gt_i32_e64 s1, s2 ; encoding: [0x7e,0x00,0xc4,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xc4,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_gt_i32_e64 s105, s105 ; encoding: [0x7e,0x00,0xc4,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xc4,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_gt_i32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xc4,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xc4,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_gt_i32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xc4,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xc4,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_gt_i32_e64 v1, v2 ; encoding: [0x7e,0x00,0xc4,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xc4,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_gt_i32_e64 v255, v255 ; encoding: [0x7e,0x00,0xc4,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xc4,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_gt_i32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xc4,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xc4,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_gt_i32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xc4,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xd4,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_gt_i64_e64 -1, -1 ; encoding: [0x7e,0x00,0xd4,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x00,0xd4,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_gt_i64_e64 0.5, null ; encoding: [0x7e,0x00,0xd4,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x00,0xd4,0xd4,0xff,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_gt_i64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xd4,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xd4,0xd4,0x7e,0xfa,0x01,0x00
+# GFX1250: v_cmpx_gt_i64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xd4,0xd4,0x7e,0xfa,0x01,0x00]
+
+0x7e,0x00,0xd4,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_gt_i64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xd4,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xd4,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_gt_i64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xd4,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xd4,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_gt_i64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xd4,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xd4,0xd4,0xfd,0xfc,0x00,0x00
+# GFX1250: v_cmpx_gt_i64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xd4,0xd4,0xfd,0xfc,0x00,0x00]
+
+0x7e,0x00,0xd4,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_gt_i64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xd4,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xd4,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_gt_i64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xd4,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xd4,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_gt_i64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xd4,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xd4,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_gt_i64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xd4,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0xbc,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_gt_u16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xbc,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xbc,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_gt_u16_e64 0x3800, m0 ; encoding: [0x7e,0x00,0xbc,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xbc,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_gt_u16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xbc,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xbc,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_gt_u16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xbc,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xbc,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_gt_u16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xbc,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xbc,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_gt_u16_e64 m0, 0x3800 ; encoding: [0x7e,0x00,0xbc,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xbc,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_gt_u16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xbc,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xbc,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_gt_u16_e64 s1, s2 ; encoding: [0x7e,0x00,0xbc,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xbc,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_gt_u16_e64 s105, s105 ; encoding: [0x7e,0x00,0xbc,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xbc,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_gt_u16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xbc,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xbc,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_gt_u16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xbc,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xbc,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_gt_u16_e64 v1, v2 ; encoding: [0x7e,0x00,0xbc,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xbc,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_gt_u16_e64 v255, v255 ; encoding: [0x7e,0x00,0xbc,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xbc,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_gt_u16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xbc,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xbc,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_gt_u16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xbc,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xcc,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_gt_u32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xcc,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xcc,0xd4,0xf0,0xfa,0x00,0x00
+# GFX1250: v_cmpx_gt_u32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xcc,0xd4,0xf0,0xfa,0x00,0x00]
+
+0x7e,0x00,0xcc,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_gt_u32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xcc,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xcc,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_gt_u32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xcc,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xcc,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_gt_u32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xcc,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xcc,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_gt_u32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xcc,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0xcc,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_gt_u32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xcc,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xcc,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_gt_u32_e64 s1, s2 ; encoding: [0x7e,0x00,0xcc,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xcc,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_gt_u32_e64 s105, s105 ; encoding: [0x7e,0x00,0xcc,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xcc,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_gt_u32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xcc,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xcc,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_gt_u32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xcc,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xcc,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_gt_u32_e64 v1, v2 ; encoding: [0x7e,0x00,0xcc,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xcc,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_gt_u32_e64 v255, v255 ; encoding: [0x7e,0x00,0xcc,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xcc,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_gt_u32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xcc,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xcc,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_gt_u32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xcc,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xdc,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_gt_u64_e64 -1, -1 ; encoding: [0x7e,0x00,0xdc,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x00,0xdc,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_gt_u64_e64 0.5, null ; encoding: [0x7e,0x00,0xdc,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x00,0xdc,0xd4,0xff,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_gt_u64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xdc,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xdc,0xd4,0x7e,0xfa,0x01,0x00
+# GFX1250: v_cmpx_gt_u64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xdc,0xd4,0x7e,0xfa,0x01,0x00]
+
+0x7e,0x00,0xdc,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_gt_u64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xdc,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xdc,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_gt_u64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xdc,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xdc,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_gt_u64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xdc,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xdc,0xd4,0xfd,0xfc,0x00,0x00
+# GFX1250: v_cmpx_gt_u64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xdc,0xd4,0xfd,0xfc,0x00,0x00]
+
+0x7e,0x00,0xdc,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_gt_u64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xdc,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xdc,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_gt_u64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xdc,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xdc,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_gt_u64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xdc,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xdc,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_gt_u64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xdc,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0x83,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_le_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x83,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x83,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_le_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x83,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x83,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_le_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x83,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x83,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_le_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x83,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x83,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_le_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x83,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x83,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_le_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x83,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x83,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_le_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x83,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x83,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_le_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x83,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x83,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_le_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x83,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x83,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_le_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x83,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x83,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_le_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x83,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x83,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_le_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x83,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x83,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_le_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x83,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x83,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_le_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x83,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x83,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_le_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x83,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0x93,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_le_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x93,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x93,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_le_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x93,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x93,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_le_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x93,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x93,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_le_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x93,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x93,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_le_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x93,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x93,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_le_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x93,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x93,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_le_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x93,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x93,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_le_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x93,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x93,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_le_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x93,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x93,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_le_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x93,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x93,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_le_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x93,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x93,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_le_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x93,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x93,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_le_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x93,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x93,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_le_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x93,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x93,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_le_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x93,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xa3,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_le_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xa3,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x01,0xa3,0xd4,0x7e,0xfa,0x01,0x20
+# GFX1250: v_cmpx_le_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xa3,0xd4,0x7e,0xfa,0x01,0x20]
+
+0x7e,0x03,0xa3,0xd4,0xfd,0xfc,0x00,0x60
+# GFX1250: v_cmpx_le_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xa3,0xd4,0xfd,0xfc,0x00,0x60]
+
+0x7e,0x00,0xa3,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_le_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xa3,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x82,0xa3,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_le_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xa3,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xa3,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_le_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xa3,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xa3,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_le_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xa3,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xa3,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_le_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xa3,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xa3,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_le_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xa3,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xa3,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_le_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xa3,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xa3,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_le_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xa3,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xa3,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_le_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xa3,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0xb3,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_le_i16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xb3,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xb3,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_le_i16_e64 0x3800, m0 ; encoding: [0x7e,0x00,0xb3,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xb3,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_le_i16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xb3,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xb3,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_le_i16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xb3,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xb3,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_le_i16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xb3,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xb3,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_le_i16_e64 m0, 0x3800 ; encoding: [0x7e,0x00,0xb3,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xb3,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_le_i16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xb3,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xb3,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_le_i16_e64 s1, s2 ; encoding: [0x7e,0x00,0xb3,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xb3,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_le_i16_e64 s105, s105 ; encoding: [0x7e,0x00,0xb3,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xb3,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_le_i16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xb3,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xb3,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_le_i16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xb3,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xb3,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_le_i16_e64 v1, v2 ; encoding: [0x7e,0x00,0xb3,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xb3,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_le_i16_e64 v255, v255 ; encoding: [0x7e,0x00,0xb3,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xb3,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_le_i16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xb3,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xb3,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_le_i16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xb3,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xc3,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_le_i32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xc3,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xc3,0xd4,0xf0,0xfa,0x00,0x00
+# GFX1250: v_cmpx_le_i32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xc3,0xd4,0xf0,0xfa,0x00,0x00]
+
+0x7e,0x00,0xc3,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_le_i32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xc3,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xc3,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_le_i32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xc3,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xc3,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_le_i32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xc3,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xc3,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_le_i32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xc3,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0xc3,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_le_i32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xc3,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xc3,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_le_i32_e64 s1, s2 ; encoding: [0x7e,0x00,0xc3,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xc3,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_le_i32_e64 s105, s105 ; encoding: [0x7e,0x00,0xc3,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xc3,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_le_i32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xc3,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xc3,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_le_i32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xc3,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xc3,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_le_i32_e64 v1, v2 ; encoding: [0x7e,0x00,0xc3,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xc3,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_le_i32_e64 v255, v255 ; encoding: [0x7e,0x00,0xc3,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xc3,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_le_i32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xc3,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xc3,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_le_i32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xc3,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xd3,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_le_i64_e64 -1, -1 ; encoding: [0x7e,0x00,0xd3,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x00,0xd3,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_le_i64_e64 0.5, null ; encoding: [0x7e,0x00,0xd3,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x00,0xd3,0xd4,0xff,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_le_i64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xd3,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xd3,0xd4,0x7e,0xfa,0x01,0x00
+# GFX1250: v_cmpx_le_i64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xd3,0xd4,0x7e,0xfa,0x01,0x00]
+
+0x7e,0x00,0xd3,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_le_i64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xd3,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xd3,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_le_i64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xd3,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xd3,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_le_i64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xd3,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xd3,0xd4,0xfd,0xfc,0x00,0x00
+# GFX1250: v_cmpx_le_i64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xd3,0xd4,0xfd,0xfc,0x00,0x00]
+
+0x7e,0x00,0xd3,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_le_i64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xd3,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xd3,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_le_i64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xd3,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xd3,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_le_i64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xd3,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xd3,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_le_i64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xd3,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0xbb,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_le_u16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xbb,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xbb,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_le_u16_e64 0x3800, m0 ; encoding: [0x7e,0x00,0xbb,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xbb,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_le_u16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xbb,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xbb,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_le_u16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xbb,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xbb,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_le_u16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xbb,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xbb,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_le_u16_e64 m0, 0x3800 ; encoding: [0x7e,0x00,0xbb,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xbb,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_le_u16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xbb,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xbb,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_le_u16_e64 s1, s2 ; encoding: [0x7e,0x00,0xbb,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xbb,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_le_u16_e64 s105, s105 ; encoding: [0x7e,0x00,0xbb,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xbb,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_le_u16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xbb,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xbb,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_le_u16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xbb,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xbb,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_le_u16_e64 v1, v2 ; encoding: [0x7e,0x00,0xbb,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xbb,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_le_u16_e64 v255, v255 ; encoding: [0x7e,0x00,0xbb,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xbb,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_le_u16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xbb,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xbb,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_le_u16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xbb,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xcb,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_le_u32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xcb,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xcb,0xd4,0xf0,0xfa,0x00,0x00
+# GFX1250: v_cmpx_le_u32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xcb,0xd4,0xf0,0xfa,0x00,0x00]
+
+0x7e,0x00,0xcb,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_le_u32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xcb,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xcb,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_le_u32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xcb,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xcb,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_le_u32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xcb,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xcb,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_le_u32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xcb,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0xcb,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_le_u32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xcb,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xcb,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_le_u32_e64 s1, s2 ; encoding: [0x7e,0x00,0xcb,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xcb,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_le_u32_e64 s105, s105 ; encoding: [0x7e,0x00,0xcb,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xcb,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_le_u32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xcb,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xcb,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_le_u32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xcb,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xcb,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_le_u32_e64 v1, v2 ; encoding: [0x7e,0x00,0xcb,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xcb,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_le_u32_e64 v255, v255 ; encoding: [0x7e,0x00,0xcb,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xcb,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_le_u32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xcb,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xcb,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_le_u32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xcb,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xdb,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_le_u64_e64 -1, -1 ; encoding: [0x7e,0x00,0xdb,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x00,0xdb,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_le_u64_e64 0.5, null ; encoding: [0x7e,0x00,0xdb,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x00,0xdb,0xd4,0xff,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_le_u64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xdb,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xdb,0xd4,0x7e,0xfa,0x01,0x00
+# GFX1250: v_cmpx_le_u64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xdb,0xd4,0x7e,0xfa,0x01,0x00]
+
+0x7e,0x00,0xdb,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_le_u64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xdb,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xdb,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_le_u64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xdb,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xdb,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_le_u64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xdb,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xdb,0xd4,0xfd,0xfc,0x00,0x00
+# GFX1250: v_cmpx_le_u64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xdb,0xd4,0xfd,0xfc,0x00,0x00]
+
+0x7e,0x00,0xdb,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_le_u64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xdb,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xdb,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_le_u64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xdb,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xdb,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_le_u64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xdb,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xdb,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_le_u64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xdb,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0x85,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_lg_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x85,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x85,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_lg_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x85,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x85,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_lg_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x85,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x85,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_lg_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x85,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x85,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_lg_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x85,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x85,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_lg_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x85,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x85,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_lg_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x85,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x85,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_lg_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x85,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x85,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_lg_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x85,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x85,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_lg_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x85,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x85,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_lg_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x85,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x85,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_lg_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x85,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x85,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_lg_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x85,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x85,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_lg_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x85,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x85,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_lg_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x85,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0x95,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_lg_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x95,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x95,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_lg_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x95,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x95,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_lg_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x95,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x95,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_lg_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x95,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x95,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_lg_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x95,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x95,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_lg_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x95,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x95,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_lg_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x95,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x95,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_lg_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x95,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x95,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_lg_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x95,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x95,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_lg_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x95,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x95,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_lg_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x95,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x95,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_lg_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x95,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x95,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_lg_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x95,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x95,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_lg_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x95,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x95,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_lg_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x95,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xa5,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_lg_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xa5,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x01,0xa5,0xd4,0x7e,0xfa,0x01,0x20
+# GFX1250: v_cmpx_lg_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xa5,0xd4,0x7e,0xfa,0x01,0x20]
+
+0x7e,0x03,0xa5,0xd4,0xfd,0xfc,0x00,0x60
+# GFX1250: v_cmpx_lg_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xa5,0xd4,0xfd,0xfc,0x00,0x60]
+
+0x7e,0x00,0xa5,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_lg_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xa5,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x82,0xa5,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_lg_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xa5,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xa5,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_lg_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xa5,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xa5,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_lg_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xa5,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xa5,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_lg_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xa5,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xa5,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_lg_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xa5,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xa5,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_lg_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xa5,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xa5,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_lg_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xa5,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xa5,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_lg_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xa5,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0x81,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_lt_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x81,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x81,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_lt_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x81,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x81,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_lt_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x81,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x81,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_lt_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x81,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x81,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_lt_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x81,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x81,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_lt_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x81,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x81,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_lt_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x81,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x81,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_lt_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x81,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x81,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_lt_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x81,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x81,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_lt_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x81,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x81,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_lt_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x81,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x81,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_lt_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x81,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x81,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_lt_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x81,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x81,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_lt_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x81,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x81,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_lt_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x81,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0x91,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_lt_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x91,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x91,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_lt_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x91,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x91,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_lt_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x91,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x91,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_lt_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x91,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x91,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_lt_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x91,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x91,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_lt_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x91,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x91,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_lt_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x91,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x91,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_lt_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x91,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x91,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_lt_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x91,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x91,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_lt_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x91,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x91,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_lt_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x91,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x91,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_lt_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x91,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x91,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_lt_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x91,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x91,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_lt_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x91,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x91,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_lt_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x91,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xa1,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_lt_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xa1,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x01,0xa1,0xd4,0x7e,0xfa,0x01,0x20
+# GFX1250: v_cmpx_lt_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xa1,0xd4,0x7e,0xfa,0x01,0x20]
+
+0x7e,0x03,0xa1,0xd4,0xfd,0xfc,0x00,0x60
+# GFX1250: v_cmpx_lt_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xa1,0xd4,0xfd,0xfc,0x00,0x60]
+
+0x7e,0x00,0xa1,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_lt_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xa1,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x82,0xa1,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_lt_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xa1,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xa1,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_lt_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xa1,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xa1,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_lt_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xa1,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xa1,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_lt_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xa1,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xa1,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_lt_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xa1,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xa1,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_lt_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xa1,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xa1,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_lt_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xa1,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xa1,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_lt_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xa1,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0xb1,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_lt_i16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xb1,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xb1,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_lt_i16_e64 0x3800, m0 ; encoding: [0x7e,0x00,0xb1,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xb1,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_lt_i16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xb1,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xb1,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_lt_i16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xb1,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xb1,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_lt_i16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xb1,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xb1,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_lt_i16_e64 m0, 0x3800 ; encoding: [0x7e,0x00,0xb1,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xb1,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_lt_i16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xb1,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xb1,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_lt_i16_e64 s1, s2 ; encoding: [0x7e,0x00,0xb1,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xb1,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_lt_i16_e64 s105, s105 ; encoding: [0x7e,0x00,0xb1,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xb1,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_lt_i16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xb1,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xb1,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_lt_i16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xb1,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xb1,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_lt_i16_e64 v1, v2 ; encoding: [0x7e,0x00,0xb1,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xb1,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_lt_i16_e64 v255, v255 ; encoding: [0x7e,0x00,0xb1,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xb1,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_lt_i16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xb1,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xb1,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_lt_i16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xb1,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xc1,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_lt_i32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xc1,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xc1,0xd4,0xf0,0xfa,0x00,0x00
+# GFX1250: v_cmpx_lt_i32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xc1,0xd4,0xf0,0xfa,0x00,0x00]
+
+0x7e,0x00,0xc1,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_lt_i32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xc1,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xc1,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_lt_i32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xc1,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xc1,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_lt_i32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xc1,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xc1,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_lt_i32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xc1,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0xc1,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_lt_i32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xc1,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xc1,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_lt_i32_e64 s1, s2 ; encoding: [0x7e,0x00,0xc1,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xc1,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_lt_i32_e64 s105, s105 ; encoding: [0x7e,0x00,0xc1,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xc1,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_lt_i32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xc1,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xc1,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_lt_i32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xc1,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xc1,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_lt_i32_e64 v1, v2 ; encoding: [0x7e,0x00,0xc1,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xc1,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_lt_i32_e64 v255, v255 ; encoding: [0x7e,0x00,0xc1,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xc1,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_lt_i32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xc1,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xc1,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_lt_i32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xc1,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xd1,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_lt_i64_e64 -1, -1 ; encoding: [0x7e,0x00,0xd1,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x00,0xd1,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_lt_i64_e64 0.5, null ; encoding: [0x7e,0x00,0xd1,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x00,0xd1,0xd4,0xff,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_lt_i64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xd1,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xd1,0xd4,0x7e,0xfa,0x01,0x00
+# GFX1250: v_cmpx_lt_i64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xd1,0xd4,0x7e,0xfa,0x01,0x00]
+
+0x7e,0x00,0xd1,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_lt_i64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xd1,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xd1,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_lt_i64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xd1,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xd1,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_lt_i64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xd1,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xd1,0xd4,0xfd,0xfc,0x00,0x00
+# GFX1250: v_cmpx_lt_i64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xd1,0xd4,0xfd,0xfc,0x00,0x00]
+
+0x7e,0x00,0xd1,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_lt_i64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xd1,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xd1,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_lt_i64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xd1,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xd1,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_lt_i64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xd1,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xd1,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_lt_i64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xd1,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0xb9,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_lt_u16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xb9,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xb9,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_lt_u16_e64 0x3800, m0 ; encoding: [0x7e,0x00,0xb9,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xb9,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_lt_u16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xb9,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xb9,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_lt_u16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xb9,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xb9,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_lt_u16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xb9,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xb9,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_lt_u16_e64 m0, 0x3800 ; encoding: [0x7e,0x00,0xb9,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xb9,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_lt_u16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xb9,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xb9,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_lt_u16_e64 s1, s2 ; encoding: [0x7e,0x00,0xb9,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xb9,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_lt_u16_e64 s105, s105 ; encoding: [0x7e,0x00,0xb9,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xb9,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_lt_u16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xb9,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xb9,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_lt_u16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xb9,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xb9,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_lt_u16_e64 v1, v2 ; encoding: [0x7e,0x00,0xb9,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xb9,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_lt_u16_e64 v255, v255 ; encoding: [0x7e,0x00,0xb9,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xb9,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_lt_u16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xb9,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xb9,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_lt_u16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xb9,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xc9,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_lt_u32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xc9,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xc9,0xd4,0xf0,0xfa,0x00,0x00
+# GFX1250: v_cmpx_lt_u32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xc9,0xd4,0xf0,0xfa,0x00,0x00]
+
+0x7e,0x00,0xc9,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_lt_u32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xc9,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xc9,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_lt_u32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xc9,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xc9,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_lt_u32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xc9,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xc9,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_lt_u32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xc9,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0xc9,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_lt_u32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xc9,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xc9,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_lt_u32_e64 s1, s2 ; encoding: [0x7e,0x00,0xc9,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xc9,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_lt_u32_e64 s105, s105 ; encoding: [0x7e,0x00,0xc9,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xc9,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_lt_u32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xc9,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xc9,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_lt_u32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xc9,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xc9,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_lt_u32_e64 v1, v2 ; encoding: [0x7e,0x00,0xc9,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xc9,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_lt_u32_e64 v255, v255 ; encoding: [0x7e,0x00,0xc9,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xc9,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_lt_u32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xc9,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xc9,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_lt_u32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xc9,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xd9,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_lt_u64_e64 -1, -1 ; encoding: [0x7e,0x00,0xd9,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x00,0xd9,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_lt_u64_e64 0.5, null ; encoding: [0x7e,0x00,0xd9,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x00,0xd9,0xd4,0xff,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_lt_u64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xd9,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xd9,0xd4,0x7e,0xfa,0x01,0x00
+# GFX1250: v_cmpx_lt_u64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xd9,0xd4,0x7e,0xfa,0x01,0x00]
+
+0x7e,0x00,0xd9,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_lt_u64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xd9,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xd9,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_lt_u64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xd9,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xd9,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_lt_u64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xd9,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xd9,0xd4,0xfd,0xfc,0x00,0x00
+# GFX1250: v_cmpx_lt_u64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xd9,0xd4,0xfd,0xfc,0x00,0x00]
+
+0x7e,0x00,0xd9,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_lt_u64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xd9,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xd9,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_lt_u64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xd9,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xd9,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_lt_u64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xd9,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xd9,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_lt_u64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xd9,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0xb5,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ne_i16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xb5,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xb5,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_ne_i16_e64 0x3800, m0 ; encoding: [0x7e,0x00,0xb5,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xb5,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ne_i16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xb5,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xb5,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_ne_i16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xb5,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xb5,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_ne_i16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xb5,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xb5,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_ne_i16_e64 m0, 0x3800 ; encoding: [0x7e,0x00,0xb5,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xb5,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_ne_i16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xb5,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xb5,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_ne_i16_e64 s1, s2 ; encoding: [0x7e,0x00,0xb5,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xb5,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_ne_i16_e64 s105, s105 ; encoding: [0x7e,0x00,0xb5,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xb5,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_ne_i16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xb5,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xb5,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_ne_i16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xb5,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xb5,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_ne_i16_e64 v1, v2 ; encoding: [0x7e,0x00,0xb5,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xb5,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_ne_i16_e64 v255, v255 ; encoding: [0x7e,0x00,0xb5,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xb5,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ne_i16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xb5,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xb5,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_ne_i16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xb5,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xc5,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ne_i32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xc5,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xc5,0xd4,0xf0,0xfa,0x00,0x00
+# GFX1250: v_cmpx_ne_i32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xc5,0xd4,0xf0,0xfa,0x00,0x00]
+
+0x7e,0x00,0xc5,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ne_i32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xc5,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xc5,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_ne_i32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xc5,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xc5,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_ne_i32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xc5,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xc5,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_ne_i32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xc5,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0xc5,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_ne_i32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xc5,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xc5,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_ne_i32_e64 s1, s2 ; encoding: [0x7e,0x00,0xc5,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xc5,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_ne_i32_e64 s105, s105 ; encoding: [0x7e,0x00,0xc5,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xc5,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_ne_i32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xc5,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xc5,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_ne_i32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xc5,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xc5,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_ne_i32_e64 v1, v2 ; encoding: [0x7e,0x00,0xc5,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xc5,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_ne_i32_e64 v255, v255 ; encoding: [0x7e,0x00,0xc5,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xc5,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ne_i32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xc5,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xc5,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_ne_i32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xc5,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xd5,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_ne_i64_e64 -1, -1 ; encoding: [0x7e,0x00,0xd5,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x00,0xd5,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_ne_i64_e64 0.5, null ; encoding: [0x7e,0x00,0xd5,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x00,0xd5,0xd4,0xff,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ne_i64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xd5,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xd5,0xd4,0x7e,0xfa,0x01,0x00
+# GFX1250: v_cmpx_ne_i64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xd5,0xd4,0x7e,0xfa,0x01,0x00]
+
+0x7e,0x00,0xd5,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_ne_i64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xd5,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xd5,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_ne_i64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xd5,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xd5,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_ne_i64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xd5,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xd5,0xd4,0xfd,0xfc,0x00,0x00
+# GFX1250: v_cmpx_ne_i64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xd5,0xd4,0xfd,0xfc,0x00,0x00]
+
+0x7e,0x00,0xd5,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ne_i64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xd5,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xd5,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_ne_i64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xd5,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xd5,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_ne_i64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xd5,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xd5,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_ne_i64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xd5,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0xbd,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ne_u16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xbd,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xbd,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_ne_u16_e64 0x3800, m0 ; encoding: [0x7e,0x00,0xbd,0xd4,0xff,0xfa,0x00,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xbd,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ne_u16_e64 0xfe0b, vcc_hi ; encoding: [0x7e,0x00,0xbd,0xd4,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xbd,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_ne_u16_e64 exec_hi, null ; encoding: [0x7e,0x00,0xbd,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xbd,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_ne_u16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xbd,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xbd,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00
+# GFX1250: v_cmpx_ne_u16_e64 m0, 0x3800 ; encoding: [0x7e,0x00,0xbd,0xd4,0x7d,0xfe,0x01,0x00,0x00,0x38,0x00,0x00]
+
+0x7e,0x00,0xbd,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_ne_u16_e64 null, exec_lo ; encoding: [0x7e,0x00,0xbd,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xbd,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_ne_u16_e64 s1, s2 ; encoding: [0x7e,0x00,0xbd,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xbd,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_ne_u16_e64 s105, s105 ; encoding: [0x7e,0x00,0xbd,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xbd,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_ne_u16_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xbd,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xbd,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_ne_u16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xbd,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xbd,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_ne_u16_e64 v1, v2 ; encoding: [0x7e,0x00,0xbd,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xbd,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_ne_u16_e64 v255, v255 ; encoding: [0x7e,0x00,0xbd,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xbd,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ne_u16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0xbd,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0xbd,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_ne_u16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xbd,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xcd,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ne_u32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0xcd,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x00,0xcd,0xd4,0xf0,0xfa,0x00,0x00
+# GFX1250: v_cmpx_ne_u32_e64 0.5, m0 ; encoding: [0x7e,0x00,0xcd,0xd4,0xf0,0xfa,0x00,0x00]
+
+0x7e,0x00,0xcd,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ne_u32_e64 0xaf123456, vcc_hi ; encoding: [0x7e,0x00,0xcd,0xd4,0xff,0xd6,0x00,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xcd,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_ne_u32_e64 exec_hi, null ; encoding: [0x7e,0x00,0xcd,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xcd,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_ne_u32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0xcd,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0xcd,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_ne_u32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0xcd,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0xcd,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_ne_u32_e64 null, exec_lo ; encoding: [0x7e,0x00,0xcd,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0xcd,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_ne_u32_e64 s1, s2 ; encoding: [0x7e,0x00,0xcd,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0xcd,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_ne_u32_e64 s105, s105 ; encoding: [0x7e,0x00,0xcd,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0xcd,0xd4,0xfd,0xd4,0x00,0x00
+# GFX1250: v_cmpx_ne_u32_e64 src_scc, vcc_lo ; encoding: [0x7e,0x00,0xcd,0xd4,0xfd,0xd4,0x00,0x00]
+
+0x7e,0x00,0xcd,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_ne_u32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0xcd,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0xcd,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_ne_u32_e64 v1, v2 ; encoding: [0x7e,0x00,0xcd,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0xcd,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_ne_u32_e64 v255, v255 ; encoding: [0x7e,0x00,0xcd,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0xcd,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ne_u32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0xcd,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xcd,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_ne_u32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0xcd,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x00,0xdd,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_ne_u64_e64 -1, -1 ; encoding: [0x7e,0x00,0xdd,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x00,0xdd,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_ne_u64_e64 0.5, null ; encoding: [0x7e,0x00,0xdd,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x00,0xdd,0xd4,0xff,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ne_u64_e64 lit64(0xaf123456), vcc ; encoding: [0x7e,0x00,0xdd,0xd4,0xfe,0xd4,0x00,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xdd,0xd4,0x7e,0xfa,0x01,0x00
+# GFX1250: v_cmpx_ne_u64_e64 exec, src_scc ; encoding: [0x7e,0x00,0xdd,0xd4,0x7e,0xfa,0x01,0x00]
+
+0x7e,0x00,0xdd,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_ne_u64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xdd,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xdd,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_ne_u64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xdd,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xdd,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_ne_u64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xdd,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xdd,0xd4,0xfd,0xfc,0x00,0x00
+# GFX1250: v_cmpx_ne_u64_e64 src_scc, exec ; encoding: [0x7e,0x00,0xdd,0xd4,0xfd,0xfc,0x00,0x00]
+
+0x7e,0x00,0xdd,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ne_u64_e64 ttmp[14:15], lit64(0xaf123456) ; encoding: [0x7e,0x00,0xdd,0xd4,0x7a,0xfc,0x01,0x00,0x56,0x34,0x12,0xaf,0x00,0x00,0x00,0x00]
+
+0x7e,0x00,0xdd,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_ne_u64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xdd,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xdd,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_ne_u64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xdd,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xdd,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_ne_u64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xdd,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0x8d,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_neq_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x8d,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x8d,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_neq_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x8d,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x8d,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_neq_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x8d,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x8d,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_neq_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x8d,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x8d,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_neq_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x8d,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x8d,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_neq_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x8d,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x8d,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_neq_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x8d,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x8d,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_neq_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x8d,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x8d,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_neq_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x8d,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x8d,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_neq_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x8d,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x8d,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_neq_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x8d,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x8d,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_neq_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x8d,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x8d,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_neq_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x8d,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x8d,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_neq_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x8d,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x8d,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_neq_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x8d,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0x9d,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_neq_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x9d,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x9d,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_neq_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x9d,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x9d,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_neq_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x9d,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x9d,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_neq_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x9d,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x9d,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_neq_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x9d,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x9d,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_neq_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x9d,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x9d,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_neq_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x9d,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x9d,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_neq_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x9d,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x9d,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_neq_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x9d,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x9d,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_neq_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x9d,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x9d,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_neq_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x9d,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x9d,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_neq_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x9d,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x9d,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_neq_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x9d,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x9d,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_neq_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x9d,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x9d,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_neq_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x9d,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xad,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_neq_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xad,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x01,0xad,0xd4,0x7e,0xfa,0x01,0x20
+# GFX1250: v_cmpx_neq_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xad,0xd4,0x7e,0xfa,0x01,0x20]
+
+0x7e,0x03,0xad,0xd4,0xfd,0xfc,0x00,0x60
+# GFX1250: v_cmpx_neq_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xad,0xd4,0xfd,0xfc,0x00,0x60]
+
+0x7e,0x00,0xad,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_neq_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xad,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x82,0xad,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_neq_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xad,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xad,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_neq_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xad,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xad,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_neq_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xad,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xad,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_neq_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xad,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xad,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_neq_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xad,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xad,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_neq_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xad,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xad,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_neq_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xad,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xad,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_neq_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xad,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0x89,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_nge_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x89,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x89,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_nge_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x89,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x89,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_nge_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x89,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x89,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_nge_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x89,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x89,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_nge_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x89,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x89,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_nge_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x89,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x89,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_nge_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x89,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x89,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_nge_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x89,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x89,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_nge_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x89,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x89,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_nge_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x89,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x89,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_nge_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x89,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x89,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_nge_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x89,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x89,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_nge_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x89,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x89,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_nge_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x89,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x89,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_nge_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x89,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0x99,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_nge_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x99,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x99,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_nge_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x99,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x99,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_nge_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x99,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x99,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_nge_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x99,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x99,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_nge_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x99,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x99,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_nge_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x99,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x99,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_nge_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x99,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x99,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_nge_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x99,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x99,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_nge_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x99,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x99,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_nge_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x99,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x99,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_nge_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x99,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x99,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_nge_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x99,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x99,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_nge_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x99,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x99,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_nge_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x99,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x99,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_nge_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x99,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xa9,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_nge_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xa9,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x01,0xa9,0xd4,0x7e,0xfa,0x01,0x20
+# GFX1250: v_cmpx_nge_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xa9,0xd4,0x7e,0xfa,0x01,0x20]
+
+0x7e,0x03,0xa9,0xd4,0xfd,0xfc,0x00,0x60
+# GFX1250: v_cmpx_nge_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xa9,0xd4,0xfd,0xfc,0x00,0x60]
+
+0x7e,0x00,0xa9,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_nge_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xa9,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x82,0xa9,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_nge_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xa9,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xa9,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_nge_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xa9,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xa9,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_nge_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xa9,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xa9,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_nge_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xa9,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xa9,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_nge_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xa9,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xa9,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_nge_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xa9,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xa9,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_nge_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xa9,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xa9,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_nge_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xa9,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0x8b,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ngt_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x8b,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x8b,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_ngt_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x8b,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x8b,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ngt_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x8b,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x8b,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_ngt_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x8b,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x8b,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_ngt_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x8b,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x8b,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_ngt_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x8b,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x8b,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_ngt_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x8b,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x8b,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_ngt_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x8b,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x8b,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_ngt_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x8b,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x8b,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_ngt_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x8b,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x8b,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_ngt_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x8b,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x8b,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_ngt_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x8b,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x8b,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ngt_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x8b,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x8b,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_ngt_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x8b,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x8b,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_ngt_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x8b,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0x9b,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_ngt_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x9b,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x9b,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_ngt_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x9b,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x9b,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ngt_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x9b,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x9b,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_ngt_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x9b,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x9b,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_ngt_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x9b,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x9b,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_ngt_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x9b,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x9b,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_ngt_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x9b,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x9b,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_ngt_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x9b,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x9b,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_ngt_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x9b,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x9b,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_ngt_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x9b,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x9b,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_ngt_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x9b,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x9b,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_ngt_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x9b,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x9b,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ngt_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x9b,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x9b,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_ngt_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x9b,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x9b,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_ngt_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x9b,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xab,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_ngt_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xab,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x01,0xab,0xd4,0x7e,0xfa,0x01,0x20
+# GFX1250: v_cmpx_ngt_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xab,0xd4,0x7e,0xfa,0x01,0x20]
+
+0x7e,0x03,0xab,0xd4,0xfd,0xfc,0x00,0x60
+# GFX1250: v_cmpx_ngt_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xab,0xd4,0xfd,0xfc,0x00,0x60]
+
+0x7e,0x00,0xab,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_ngt_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xab,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x82,0xab,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ngt_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xab,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xab,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_ngt_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xab,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xab,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_ngt_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xab,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xab,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_ngt_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xab,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xab,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_ngt_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xab,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xab,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_ngt_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xab,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xab,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_ngt_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xab,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xab,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_ngt_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xab,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0x8c,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_nle_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x8c,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x8c,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_nle_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x8c,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x8c,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_nle_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x8c,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x8c,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_nle_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x8c,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x8c,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_nle_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x8c,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x8c,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_nle_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x8c,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x8c,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_nle_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x8c,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x8c,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_nle_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x8c,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x8c,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_nle_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x8c,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x8c,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_nle_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x8c,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x8c,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_nle_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x8c,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x8c,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_nle_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x8c,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x8c,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_nle_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x8c,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x8c,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_nle_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x8c,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x8c,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_nle_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x8c,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0x9c,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_nle_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x9c,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x9c,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_nle_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x9c,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x9c,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_nle_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x9c,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x9c,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_nle_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x9c,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x9c,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_nle_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x9c,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x9c,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_nle_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x9c,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x9c,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_nle_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x9c,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x9c,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_nle_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x9c,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x9c,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_nle_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x9c,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x9c,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_nle_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x9c,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x9c,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_nle_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x9c,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x9c,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_nle_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x9c,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x9c,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_nle_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x9c,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x9c,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_nle_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x9c,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x9c,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_nle_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x9c,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xac,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_nle_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xac,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x01,0xac,0xd4,0x7e,0xfa,0x01,0x20
+# GFX1250: v_cmpx_nle_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xac,0xd4,0x7e,0xfa,0x01,0x20]
+
+0x7e,0x03,0xac,0xd4,0xfd,0xfc,0x00,0x60
+# GFX1250: v_cmpx_nle_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xac,0xd4,0xfd,0xfc,0x00,0x60]
+
+0x7e,0x00,0xac,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_nle_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xac,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x82,0xac,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_nle_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xac,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xac,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_nle_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xac,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xac,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_nle_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xac,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xac,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_nle_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xac,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xac,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_nle_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xac,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xac,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_nle_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xac,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xac,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_nle_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xac,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xac,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_nle_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xac,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0x8a,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_nlg_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x8a,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x8a,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_nlg_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x8a,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x8a,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_nlg_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x8a,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x8a,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_nlg_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x8a,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x8a,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_nlg_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x8a,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x8a,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_nlg_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x8a,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x8a,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_nlg_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x8a,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x8a,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_nlg_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x8a,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x8a,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_nlg_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x8a,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x8a,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_nlg_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x8a,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x8a,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_nlg_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x8a,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x8a,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_nlg_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x8a,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x8a,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_nlg_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x8a,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x8a,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_nlg_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x8a,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x8a,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_nlg_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x8a,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0x9a,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_nlg_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x9a,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x9a,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_nlg_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x9a,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x9a,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_nlg_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x9a,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x9a,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_nlg_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x9a,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x9a,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_nlg_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x9a,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x9a,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_nlg_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x9a,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x9a,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_nlg_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x9a,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x9a,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_nlg_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x9a,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x9a,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_nlg_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x9a,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x9a,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_nlg_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x9a,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x9a,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_nlg_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x9a,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x9a,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_nlg_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x9a,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x9a,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_nlg_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x9a,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x9a,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_nlg_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x9a,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x9a,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_nlg_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x9a,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xaa,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_nlg_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xaa,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x01,0xaa,0xd4,0x7e,0xfa,0x01,0x20
+# GFX1250: v_cmpx_nlg_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xaa,0xd4,0x7e,0xfa,0x01,0x20]
+
+0x7e,0x03,0xaa,0xd4,0xfd,0xfc,0x00,0x60
+# GFX1250: v_cmpx_nlg_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xaa,0xd4,0xfd,0xfc,0x00,0x60]
+
+0x7e,0x00,0xaa,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_nlg_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xaa,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x82,0xaa,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_nlg_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xaa,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xaa,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_nlg_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xaa,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xaa,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_nlg_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xaa,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xaa,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_nlg_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xaa,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xaa,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_nlg_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xaa,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xaa,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_nlg_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xaa,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xaa,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_nlg_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xaa,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xaa,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_nlg_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xaa,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0x8e,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_nlt_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x8e,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x8e,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_nlt_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x8e,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x8e,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_nlt_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x8e,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x8e,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_nlt_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x8e,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x8e,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_nlt_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x8e,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x8e,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_nlt_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x8e,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x8e,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_nlt_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x8e,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x8e,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_nlt_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x8e,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x8e,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_nlt_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x8e,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x8e,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_nlt_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x8e,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x8e,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_nlt_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x8e,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x8e,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_nlt_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x8e,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x8e,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_nlt_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x8e,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x8e,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_nlt_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x8e,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x8e,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_nlt_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x8e,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0x9e,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_nlt_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x9e,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x9e,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_nlt_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x9e,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x9e,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_nlt_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x9e,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x9e,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_nlt_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x9e,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x9e,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_nlt_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x9e,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x9e,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_nlt_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x9e,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x9e,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_nlt_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x9e,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x9e,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_nlt_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x9e,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x9e,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_nlt_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x9e,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x9e,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_nlt_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x9e,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x9e,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_nlt_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x9e,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x9e,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_nlt_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x9e,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x9e,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_nlt_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x9e,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x9e,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_nlt_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x9e,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x9e,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_nlt_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x9e,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xae,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_nlt_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xae,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x01,0xae,0xd4,0x7e,0xfa,0x01,0x20
+# GFX1250: v_cmpx_nlt_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xae,0xd4,0x7e,0xfa,0x01,0x20]
+
+0x7e,0x03,0xae,0xd4,0xfd,0xfc,0x00,0x60
+# GFX1250: v_cmpx_nlt_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xae,0xd4,0xfd,0xfc,0x00,0x60]
+
+0x7e,0x00,0xae,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_nlt_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xae,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x82,0xae,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_nlt_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xae,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xae,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_nlt_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xae,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xae,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_nlt_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xae,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xae,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_nlt_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xae,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xae,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_nlt_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xae,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xae,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_nlt_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xae,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xae,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_nlt_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xae,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xae,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_nlt_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xae,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0x87,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_o_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x87,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x87,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_o_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x87,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x87,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_o_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x87,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x87,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_o_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x87,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x87,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_o_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x87,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x87,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_o_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x87,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x87,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_o_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x87,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x87,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_o_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x87,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x87,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_o_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x87,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x87,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_o_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x87,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x87,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_o_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x87,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x87,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_o_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x87,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x87,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_o_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x87,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x87,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_o_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x87,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x87,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_o_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x87,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0x97,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_o_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x97,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x97,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_o_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x97,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x97,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_o_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x97,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x97,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_o_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x97,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x97,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_o_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x97,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x97,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_o_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x97,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x97,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_o_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x97,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x97,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_o_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x97,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x97,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_o_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x97,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x97,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_o_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x97,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x97,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_o_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x97,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x97,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_o_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x97,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x97,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_o_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x97,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x97,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_o_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x97,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x97,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_o_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x97,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xa7,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_o_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xa7,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x01,0xa7,0xd4,0x7e,0xfa,0x01,0x20
+# GFX1250: v_cmpx_o_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xa7,0xd4,0x7e,0xfa,0x01,0x20]
+
+0x7e,0x03,0xa7,0xd4,0xfd,0xfc,0x00,0x60
+# GFX1250: v_cmpx_o_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xa7,0xd4,0xfd,0xfc,0x00,0x60]
+
+0x7e,0x00,0xa7,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_o_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xa7,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x82,0xa7,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_o_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xa7,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xa7,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_o_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xa7,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xa7,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_o_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xa7,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xa7,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_o_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xa7,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xa7,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_o_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xa7,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xa7,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_o_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xa7,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xa7,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_o_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xa7,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xa7,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_o_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xa7,0xd4,0x6a,0xf4,0x00,0x00]
+
+0x7e,0x00,0x88,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_u_f16_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x88,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x88,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_u_f16_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x88,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x88,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_u_f16_e64 -|0xfe0b|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x88,0xd4,0xff,0xd6,0x00,0x60,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x88,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_u_f16_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x88,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x88,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_u_f16_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x88,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x88,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_u_f16_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x88,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x88,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_u_f16_e64 null, exec_lo ; encoding: [0x7e,0x00,0x88,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x88,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_u_f16_e64 s1, s2 ; encoding: [0x7e,0x00,0x88,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x88,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_u_f16_e64 s105, s105 ; encoding: [0x7e,0x00,0x88,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x88,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_u_f16_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x88,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x88,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_u_f16_e64 v1, v2 ; encoding: [0x7e,0x00,0x88,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x88,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_u_f16_e64 v255, v255 ; encoding: [0x7e,0x00,0x88,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x88,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00
+# GFX1250: v_cmpx_u_f16_e64 vcc_hi, 0xfe0b ; encoding: [0x7e,0x00,0x88,0xd4,0x6b,0xfe,0x01,0x00,0x0b,0xfe,0x00,0x00]
+
+0x7e,0x00,0x88,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_u_f16_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x88,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x88,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_u_f16_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x88,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0x98,0xd4,0xc1,0xfe,0x00,0x00
+# GFX1250: v_cmpx_u_f32_e64 -1, exec_hi ; encoding: [0x7e,0x00,0x98,0xd4,0xc1,0xfe,0x00,0x00]
+
+0x7e,0x02,0x98,0xd4,0xfd,0xd4,0x00,0x20
+# GFX1250: v_cmpx_u_f32_e64 -src_scc, |vcc_lo| ; encoding: [0x7e,0x02,0x98,0xd4,0xfd,0xd4,0x00,0x20]
+
+0x7e,0x83,0x98,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_u_f32_e64 -|0xaf123456|, -|vcc_hi| clamp ; encoding: [0x7e,0x83,0x98,0xd4,0xff,0xd6,0x00,0x60,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x98,0xd4,0xf0,0xfa,0x00,0x40
+# GFX1250: v_cmpx_u_f32_e64 0.5, -m0 ; encoding: [0x7e,0x00,0x98,0xd4,0xf0,0xfa,0x00,0x40]
+
+0x7e,0x00,0x98,0xd4,0x7e,0x82,0x01,0x00
+# GFX1250: v_cmpx_u_f32_e64 exec_lo, -1 ; encoding: [0x7e,0x00,0x98,0xd4,0x7e,0x82,0x01,0x00]
+
+0x7e,0x00,0x98,0xd4,0x7d,0xe0,0x01,0x00
+# GFX1250: v_cmpx_u_f32_e64 m0, 0.5 ; encoding: [0x7e,0x00,0x98,0xd4,0x7d,0xe0,0x01,0x00]
+
+0x7e,0x00,0x98,0xd4,0x7c,0xfc,0x00,0x00
+# GFX1250: v_cmpx_u_f32_e64 null, exec_lo ; encoding: [0x7e,0x00,0x98,0xd4,0x7c,0xfc,0x00,0x00]
+
+0x7e,0x00,0x98,0xd4,0x01,0x04,0x00,0x00
+# GFX1250: v_cmpx_u_f32_e64 s1, s2 ; encoding: [0x7e,0x00,0x98,0xd4,0x01,0x04,0x00,0x00]
+
+0x7e,0x00,0x98,0xd4,0x69,0xd2,0x00,0x00
+# GFX1250: v_cmpx_u_f32_e64 s105, s105 ; encoding: [0x7e,0x00,0x98,0xd4,0x69,0xd2,0x00,0x00]
+
+0x7e,0x00,0x98,0xd4,0x7b,0xfa,0x01,0x00
+# GFX1250: v_cmpx_u_f32_e64 ttmp15, src_scc ; encoding: [0x7e,0x00,0x98,0xd4,0x7b,0xfa,0x01,0x00]
+
+0x7e,0x00,0x98,0xd4,0x01,0x05,0x02,0x00
+# GFX1250: v_cmpx_u_f32_e64 v1, v2 ; encoding: [0x7e,0x00,0x98,0xd4,0x01,0x05,0x02,0x00]
+
+0x7e,0x00,0x98,0xd4,0xff,0xff,0x03,0x00
+# GFX1250: v_cmpx_u_f32_e64 v255, v255 ; encoding: [0x7e,0x00,0x98,0xd4,0xff,0xff,0x03,0x00]
+
+0x7e,0x00,0x98,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_u_f32_e64 vcc_hi, 0xaf123456 ; encoding: [0x7e,0x00,0x98,0xd4,0x6b,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0x98,0xd4,0x6a,0xf6,0x00,0x00
+# GFX1250: v_cmpx_u_f32_e64 vcc_lo, ttmp15 ; encoding: [0x7e,0x00,0x98,0xd4,0x6a,0xf6,0x00,0x00]
+
+0x7e,0x01,0x98,0xd4,0x7f,0xf8,0x00,0x00
+# GFX1250: v_cmpx_u_f32_e64 |exec_hi|, null ; encoding: [0x7e,0x01,0x98,0xd4,0x7f,0xf8,0x00,0x00]
+
+0x7e,0x00,0xa8,0xd4,0xc1,0x82,0x01,0x00
+# GFX1250: v_cmpx_u_f64_e64 -1, -1 ; encoding: [0x7e,0x00,0xa8,0xd4,0xc1,0x82,0x01,0x00]
+
+0x7e,0x01,0xa8,0xd4,0x7e,0xfa,0x01,0x20
+# GFX1250: v_cmpx_u_f64_e64 -|exec|, src_scc ; encoding: [0x7e,0x01,0xa8,0xd4,0x7e,0xfa,0x01,0x20]
+
+0x7e,0x03,0xa8,0xd4,0xfd,0xfc,0x00,0x60
+# GFX1250: v_cmpx_u_f64_e64 -|src_scc|, -|exec| ; encoding: [0x7e,0x03,0xa8,0xd4,0xfd,0xfc,0x00,0x60]
+
+0x7e,0x00,0xa8,0xd4,0xf0,0xf8,0x00,0x00
+# GFX1250: v_cmpx_u_f64_e64 0.5, null ; encoding: [0x7e,0x00,0xa8,0xd4,0xf0,0xf8,0x00,0x00]
+
+0x7e,0x82,0xa8,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_u_f64_e64 0xaf123456, -|vcc| clamp ; encoding: [0x7e,0x82,0xa8,0xd4,0xff,0xd4,0x00,0x40,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xa8,0xd4,0x7c,0xe0,0x01,0x00
+# GFX1250: v_cmpx_u_f64_e64 null, 0.5 ; encoding: [0x7e,0x00,0xa8,0xd4,0x7c,0xe0,0x01,0x00]
+
+0x7e,0x00,0xa8,0xd4,0x68,0xd0,0x00,0x00
+# GFX1250: v_cmpx_u_f64_e64 s[104:105], s[104:105] ; encoding: [0x7e,0x00,0xa8,0xd4,0x68,0xd0,0x00,0x00]
+
+0x7e,0x00,0xa8,0xd4,0x02,0x08,0x00,0x00
+# GFX1250: v_cmpx_u_f64_e64 s[2:3], s[4:5] ; encoding: [0x7e,0x00,0xa8,0xd4,0x02,0x08,0x00,0x00]
+
+0x7e,0x00,0xa8,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf
+# GFX1250: v_cmpx_u_f64_e64 ttmp[14:15], 0xaf123456 ; encoding: [0x7e,0x00,0xa8,0xd4,0x7a,0xfe,0x01,0x00,0x56,0x34,0x12,0xaf]
+
+0x7e,0x00,0xa8,0xd4,0xfe,0xfd,0x03,0x00
+# GFX1250: v_cmpx_u_f64_e64 v[254:255], v[254:255] ; encoding: [0x7e,0x00,0xa8,0xd4,0xfe,0xfd,0x03,0x00]
+
+0x7e,0x00,0xa8,0xd4,0x02,0x05,0x02,0x00
+# GFX1250: v_cmpx_u_f64_e64 v[2:3], v[2:3] ; encoding: [0x7e,0x00,0xa8,0xd4,0x02,0x05,0x02,0x00]
+
+0x7e,0x00,0xa8,0xd4,0x6a,0xf4,0x00,0x00
+# GFX1250: v_cmpx_u_f64_e64 vcc, ttmp[14:15] ; encoding: [0x7e,0x00,0xa8,0xd4,0x6a,0xf4,0x00,0x00]
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3p_dpp16.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3p_dpp16.txt
new file mode 100644
index 0000000..73e9d73
--- /dev/null
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3p_dpp16.txt
@@ -0,0 +1,10 @@
+# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX1250 %s
+
+# GFX1250: v_fma_mix_f32_bf16_e64_dpp v0, v1, v2, v3 row_ror:7 row_mask:0xf bank_mask:0x1 ; encoding: [0x00,0x00,0x3d,0xcc,0xfa,0x04,0x0e,0x04,0x01,0x27,0x01,0xf1]
+0x00,0x00,0x3d,0xcc,0xfa,0x04,0x0e,0x04,0x01,0x27,0x01,0xf1
+
+# GFX1250: v_fma_mixlo_bf16_e64_dpp v0, v1, v2, v3 op_sel_hi:[1,1,1] clamp quad_perm:[0,2,3,1] row_mask:0x0 bank_mask:0xf ; encoding: [0x00,0xc0,0x3e,0xcc,0xfa,0x04,0x0e,0x1c,0x01,0x78,0x00,0x0f]
+0x00,0xc0,0x3e,0xcc,0xfa,0x04,0x0e,0x1c,0x01,0x78,0x00,0x0f
+
+# GFX1250: v_fma_mixhi_bf16_e64_dpp v0, v1, v2, v3 op_sel_hi:[1,1,1] clamp quad_perm:[0,2,3,1] row_mask:0x0 bank_mask:0xf ; encoding: [0x00,0xc0,0x3f,0xcc,0xfa,0x04,0x0e,0x1c,0x01,0x78,0x00,0x0f]
+0x00,0xc0,0x3f,0xcc,0xfa,0x04,0x0e,0x1c,0x01,0x78,0x00,0x0f
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3p_dpp8.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3p_dpp8.txt
new file mode 100644
index 0000000..27e7025
--- /dev/null
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3p_dpp8.txt
@@ -0,0 +1,19 @@
+# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX1250 %s
+
+# GFX1250: v_fma_mix_f32_bf16_e64_dpp v0, v1, v2, v3 clamp dpp8:[2,2,2,2,4,4,4,4] fi:1 ; encoding: [0x00,0x80,0x3d,0xcc,0xea,0x04,0x0e,0x04,0x01,0x92,0x44,0x92]
+0x00,0x80,0x3d,0xcc,0xea,0x04,0x0e,0x04,0x01,0x92,0x44,0x92
+
+# GFX1250: v_fma_mix_f32_bf16_e64_dpp v0, v1, v2, v3 dpp8:[2,2,2,2,4,4,4,4] ; encoding: [0x00,0x00,0x3d,0xcc,0xe9,0x04,0x0e,0x04,0x01,0x92,0x44,0x92]
+0x00,0x00,0x3d,0xcc,0xe9,0x04,0x0e,0x04,0x01,0x92,0x44,0x92
+
+# GFX1250: v_fma_mixlo_bf16_e64_dpp v0, |v1|, -v2, |v3| dpp8:[2,2,2,2,4,4,4,4] ; encoding: [0x00,0x05,0x3e,0xcc,0xe9,0x04,0x0e,0x44,0x01,0x92,0x44,0x92]
+0x00,0x05,0x3e,0xcc,0xe9,0x04,0x0e,0x44,0x01,0x92,0x44,0x92
+
+# GFX1250: v_fma_mixlo_bf16_e64_dpp v0, |v1|, -v2, |v3| op_sel:[1,0,0] op_sel_hi:[1,0,0] dpp8:[2,2,2,2,4,4,4,4] ; encoding: [0x00,0x0d,0x3e,0xcc,0xe9,0x04,0x0e,0x4c,0x01,0x92,0x44,0x92]
+0x00,0x0d,0x3e,0xcc,0xe9,0x04,0x0e,0x4c,0x01,0x92,0x44,0x92
+
+# GFX1250: v_fma_mixhi_bf16_e64_dpp v0, |v1|, -v2, |v3| dpp8:[2,2,2,2,4,4,4,4] ; encoding: [0x00,0x05,0x3f,0xcc,0xe9,0x04,0x0e,0x44,0x01,0x92,0x44,0x92]
+0x00,0x05,0x3f,0xcc,0xe9,0x04,0x0e,0x44,0x01,0x92,0x44,0x92
+
+# GFX1250: v_fma_mixhi_bf16_e64_dpp v0, |v1|, -v2, |v3| op_sel:[1,0,0] op_sel_hi:[1,0,0] dpp8:[2,2,2,2,4,4,4,4] ; encoding: [0x00,0x0d,0x3f,0xcc,0xe9,0x04,0x0e,0x4c,0x01,0x92,0x44,0x92]
+0x00,0x0d,0x3f,0xcc,0xe9,0x04,0x0e,0x4c,0x01,0x92,0x44,0x92
diff --git a/llvm/test/MC/Disassembler/RISCV/riscv-mapping-symbols.s b/llvm/test/MC/Disassembler/RISCV/riscv-mapping-symbols.s
new file mode 100644
index 0000000..ff15008
--- /dev/null
+++ b/llvm/test/MC/Disassembler/RISCV/riscv-mapping-symbols.s
@@ -0,0 +1,20 @@
+# RUN: llvm-mc --triple=riscv32-unknown-none-elf %s -filetype=obj -o - \
+# RUN: | llvm-objdump -dr - \
+# RUN: | FileCheck %s
+# RUN: llvm-mc --triple=riscv64-unknown-none-elf %s -filetype=obj -o - \
+# RUN: | llvm-objdump -dr - \
+# RUN: | FileCheck %s
+
+
+ # CHECK: 00000013 nop
+ nop
+
+ # CHECK-NEXT: 55 55 55 55 .word 0x55555555
+ .word 0x55555555
+
+ # CHECK-NEXT: 00 00 00 00 .word 0x00000000
+ # CHECK-NEXT: R_RISCV_32 foo
+ .word foo
+
+ # CHECK-NEXT: 00000013 nop
+ nop
diff --git a/llvm/test/MC/ELF/many-instructions.s b/llvm/test/MC/ELF/many-instructions.s
index 843d35f..7c13c0d 100644
--- a/llvm/test/MC/ELF/many-instructions.s
+++ b/llvm/test/MC/ELF/many-instructions.s
@@ -1,4 +1,5 @@
-# REQUIRES: asserts
+## Checks the size of an internal MC structure that is different on 32-bit.
+# REQUIRES: asserts, llvm-64-bits
# RUN: llvm-mc -filetype=obj -triple=x86_64 %s -o /dev/null -debug-only=mc-dump 2>&1 | grep -E -o '[0-9]+ Data Size:[0-9]+' | FileCheck %s
## Test that encodeInstruction may cause a new fragment to be created.
diff --git a/llvm/test/MC/RISCV/large-instructions.s b/llvm/test/MC/RISCV/large-instructions.s
deleted file mode 100644
index b50dbde..0000000
--- a/llvm/test/MC/RISCV/large-instructions.s
+++ /dev/null
@@ -1,29 +0,0 @@
-# RUN: llvm-mc -filetype=obj -triple riscv32 < %s \
-# RUN: | llvm-objdump -d - | FileCheck %s
-
-# CHECK: 011f 4523 8967 <unknown>
-.byte 0x1f, 0x01, 0x23, 0x45, 0x67, 0x89
-
-# CHECK: 4523013f cdab8967 <unknown>
-.byte 0x3f, 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd
-
-# CHECK: 007f 4523 8967 cdab feef <unknown>
-.byte 0x7f, 0x00, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe
-
-# CHECK: 4523107f cdab8967 badcfeef <unknown>
-.byte 0x7f, 0x10, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba
-
-# CHECK: 207f 4523 8967 cdab feef badc 7698 <unknown>
-.byte 0x7f, 0x20, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76
-
-# CHECK: 4523307f cdab8967 badcfeef 32547698 <unknown>
-.byte 0x7f, 0x30, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32
-
-# CHECK: 407f 4523 8967 cdab feef badc 7698 3254 1210 <unknown>
-.byte 0x7f, 0x40, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10, 0x12
-
-# CHECK: 4523507f cdab8967 badcfeef 32547698 56341210 <unknown>
-.byte 0x7f, 0x50, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10, 0x12, 0x34, 0x56
-
-# CHECK: 607f 4523 8967 cdab feef badc 7698 3254 1210 5634 9a78 <unknown>
-.byte 0x7f, 0x60, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10, 0x12, 0x34, 0x56, 0x78, 0x9a
diff --git a/llvm/test/MC/RISCV/large-instructions.test b/llvm/test/MC/RISCV/large-instructions.test
new file mode 100644
index 0000000..b8396a9
--- /dev/null
+++ b/llvm/test/MC/RISCV/large-instructions.test
@@ -0,0 +1,60 @@
+# RUN: yaml2obj %s -o %t
+# RUN: llvm-objdump -d %t | FileCheck %s
+
+## This CHECKs objdump's handling of wide instruction encodings, and how it
+## groups the instruction bytes when disassembling.
+##
+## This is written in YAML because using `.byte` emits the wrong mapping
+## symbols.
+
+--- !ELF
+FileHeader:
+ Class: ELFCLASS32
+ Data: ELFDATA2LSB
+ Type: ET_REL
+ Machine: EM_RISCV
+ SectionHeaderStringTable: .strtab
+Sections:
+ - Name: .text
+ Type: SHT_PROGBITS
+ Flags: [ SHF_ALLOC, SHF_EXECINSTR ]
+ AddressAlign: 0x1
+ ContentArray: [
+ # CHECK: 011f 4523 8967 <unknown>
+ 0x1f, 0x01, 0x23, 0x45, 0x67, 0x89,
+
+ # CHECK: 4523013f cdab8967 <unknown>
+ 0x3f, 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd,
+
+ # CHECK: 007f 4523 8967 cdab feef <unknown>
+ 0x7f, 0x00, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe,
+
+ # CHECK: 4523107f cdab8967 badcfeef <unknown>
+ 0x7f, 0x10, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba,
+
+ # CHECK: 207f 4523 8967 cdab feef badc 7698 <unknown>
+ 0x7f, 0x20, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76,
+
+ # CHECK: 4523307f cdab8967 badcfeef 32547698 <unknown>
+ 0x7f, 0x30, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32,
+
+ # CHECK: 407f 4523 8967 cdab feef badc 7698 3254 1210 <unknown>
+ 0x7f, 0x40, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10, 0x12,
+
+ # CHECK: 4523507f cdab8967 badcfeef 32547698 56341210 <unknown>
+ 0x7f, 0x50, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10, 0x12, 0x34, 0x56,
+
+ # CHECK: 607f 4523 8967 cdab feef badc 7698 3254 1210 5634 9a78 <unknown>
+ 0x7f, 0x60, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10, 0x12, 0x34, 0x56, 0x78, 0x9a,
+ ]
+
+ - Type: SectionHeaderTable
+ Sections:
+ - Name: .strtab
+ - Name: .symtab
+ - Name: .text
+Symbols:
+ - Name: "$x"
+ Section: .text
+ Value: 0x0
+...
diff --git a/llvm/test/MC/RISCV/nop-slide.s b/llvm/test/MC/RISCV/nop-slide.s
index 4dc888b..a49ffdc 100644
--- a/llvm/test/MC/RISCV/nop-slide.s
+++ b/llvm/test/MC/RISCV/nop-slide.s
@@ -10,18 +10,15 @@
auipc a0, 0
# CHECK-RVC-NORELAX: 0000000000000000 <.text>:
-# CHECK-RVC-NORELAX-NEXT: 0: 0000 unimp
-# CHECK-RVC-NORELAX-NEXT: 2: 0001 nop
+# CHECK-RVC-NORELAX-NEXT: 0: 00 00 01 00 .word 0x00010000
# CHECK-RVC-NORELAX-NEXT: 4: 00000517 auipc a0, 0x0
# CHECK-RVC-RELAX: 0000000000000000 <.text>:
# CHECK-RVC-RELAX-NEXT: 0: 0001 nop
-# CHECK-RVC-RELAX-NEXT: 2: 0100 addi s0, sp, 0x80
-# CHECK-RVC-RELAX-NEXT: 4: 1700 addi s0, sp, 0x3a0
-# CHECK-RVC-RELAX-NEXT: 6: 0005 c.nop 0x1
-# CHECK-RVC-RELAX-NEXT: 8: 00 <unknown>
+# CHECK-RVC-RELAX-NEXT: 2: 00 01 .short 0x0100
+# CHECK-RVC-RELAX-NEXT: 4: 00 .byte 0x00
+# CHECK-RVC-RELAX-NEXT: 5: 00000517 auipc a0, 0x0
# CHECK: 0000000000000000 <.text>:
-# CHECK-NEXT: 0: 0000 <unknown>
-# CHECK-NEXT: 2: 0000 <unknown>
+# CHECK-NEXT: 0: 00 00 00 00 .word 0x00000000
# CHECK-NEXT: 4: 00000517 auipc a0, 0x0
diff --git a/llvm/test/MC/RISCV/rvv/vsetvl-invalid.s b/llvm/test/MC/RISCV/rvv/vsetvl-invalid.s
index b45f3f2..d97b538 100644
--- a/llvm/test/MC/RISCV/rvv/vsetvl-invalid.s
+++ b/llvm/test/MC/RISCV/rvv/vsetvl-invalid.s
@@ -4,37 +4,37 @@
# RUN: | llvm-objdump -d --mattr=+v - | FileCheck %s
# CHECK: vsetvli a1, a0, e64, m1, tu, mu
-.word 0x018575d7
+.insn 4, 0x018575d7
# CHECK: vsetvli a1, a0, 0x1c
-.word 0x01c575d7
+.insn 4, 0x01c575d7
# CHECK: vsetvli a1, a0, 0x24
-.word 0x024575d7
+.insn 4, 0x024575d7
# CHECK: vsetvli a1, a0, 0x29
-.word 0x029575d7
+.insn 4, 0x029575d7
# CHECK: vsetvli a1, a0, 0x110
-.word 0x110575d7
+.insn 4, 0x110575d7
# CHECK: vsetvli a1, a0, e64, mf8, tu, mu
-.word 0x01d575d7
+.insn 4, 0x01d575d7
# CHECK: vsetivli a1, 0x10, e8, m4, tu, mu
-.word 0xc02875d7
+.insn 4, 0xc02875d7
# CHECK: vsetivli a1, 0x10, 0xc
-.word 0xc0c875d7
+.insn 4, 0xc0c875d7
# CHECK: vsetivli a1, 0x10, 0x14
-.word 0xc14875d7
+.insn 4, 0xc14875d7
# CHECK: vsetivli a1, 0x10, 0x38
-.word 0xc38875d7
+.insn 4, 0xc38875d7
# CHECK: vsetivli a1, 0x10, 0x103
-.word 0xd03875d7
+.insn 4, 0xd03875d7
# CHECK: vsetivli a1, 0x10, e8, mf4, tu, mu
-.word 0xc06875d7
+.insn 4, 0xc06875d7
diff --git a/llvm/test/Transforms/GVN/PRE/phi-translate-2.ll b/llvm/test/Transforms/GVN/PRE/phi-translate-2.ll
index a38d3e5..1e789b0 100644
--- a/llvm/test/Transforms/GVN/PRE/phi-translate-2.ll
+++ b/llvm/test/Transforms/GVN/PRE/phi-translate-2.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt < %s -passes=gvn -S | FileCheck %s
+; RUN: opt < %s -passes=gvn -S | FileCheck %s --check-prefixes=CHECK,MDEP
+; RUN: opt < %s -passes='gvn<memoryssa>' -S | FileCheck %s --check-prefixes=CHECK,MSSA
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
@a = common global [100 x i64] zeroinitializer, align 16
@@ -50,32 +51,56 @@ if.end: ; preds = %if.then, %entry
}
define void @test2(i64 %i) {
-; CHECK-LABEL: @test2(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [100 x i64], ptr @a, i64 0, i64 [[I:%.*]]
-; CHECK-NEXT: [[T0:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [100 x i64], ptr @b, i64 0, i64 [[I]]
-; CHECK-NEXT: [[T1:%.*]] = load i64, ptr [[ARRAYIDX1]], align 8
-; CHECK-NEXT: [[MUL:%.*]] = mul nsw i64 [[T1]], [[T0]]
-; CHECK-NEXT: store i64 [[MUL]], ptr @g1, align 8
-; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i64 [[MUL]], 3
-; CHECK-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
-; CHECK: if.then:
-; CHECK-NEXT: [[CALL:%.*]] = tail call i64 (...) @goo()
-; CHECK-NEXT: store i64 [[CALL]], ptr @g2, align 8
-; CHECK-NEXT: [[T2_PRE:%.*]] = load i64, ptr getelementptr inbounds nuw (i8, ptr @a, i64 24), align 8
-; CHECK-NEXT: [[T3_PRE:%.*]] = load i64, ptr getelementptr inbounds nuw (i8, ptr @b, i64 24), align 8
-; CHECK-NEXT: [[DOTPRE:%.*]] = mul nsw i64 [[T3_PRE]], [[T2_PRE]]
-; CHECK-NEXT: br label [[IF_END]]
-; CHECK: if.end:
-; CHECK-NEXT: [[MUL5_PRE_PHI:%.*]] = phi i64 [ [[DOTPRE]], [[IF_THEN]] ], [ [[MUL]], [[ENTRY:%.*]] ]
-; CHECK-NEXT: [[T3:%.*]] = phi i64 [ [[T3_PRE]], [[IF_THEN]] ], [ [[T1]], [[ENTRY]] ]
-; CHECK-NEXT: [[T2:%.*]] = phi i64 [ [[T2_PRE]], [[IF_THEN]] ], [ [[T0]], [[ENTRY]] ]
-; CHECK-NEXT: [[I_ADDR_0:%.*]] = phi i64 [ 3, [[IF_THEN]] ], [ [[I]], [[ENTRY]] ]
-; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [100 x i64], ptr @a, i64 0, i64 [[I_ADDR_0]]
-; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [100 x i64], ptr @b, i64 0, i64 [[I_ADDR_0]]
-; CHECK-NEXT: store i64 [[MUL5_PRE_PHI]], ptr @g3, align 8
-; CHECK-NEXT: ret void
+; MDEP-LABEL: @test2(
+; MDEP-NEXT: entry:
+; MDEP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [100 x i64], ptr @a, i64 0, i64 [[I:%.*]]
+; MDEP-NEXT: [[T0:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
+; MDEP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [100 x i64], ptr @b, i64 0, i64 [[I]]
+; MDEP-NEXT: [[T1:%.*]] = load i64, ptr [[ARRAYIDX1]], align 8
+; MDEP-NEXT: [[MUL:%.*]] = mul nsw i64 [[T1]], [[T0]]
+; MDEP-NEXT: store i64 [[MUL]], ptr @g1, align 8
+; MDEP-NEXT: [[CMP:%.*]] = icmp sgt i64 [[MUL]], 3
+; MDEP-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
+; MDEP: if.then:
+; MDEP-NEXT: [[CALL:%.*]] = tail call i64 (...) @goo()
+; MDEP-NEXT: store i64 [[CALL]], ptr @g2, align 8
+; MDEP-NEXT: [[T2_PRE:%.*]] = load i64, ptr getelementptr inbounds nuw (i8, ptr @a, i64 24), align 8
+; MDEP-NEXT: [[T3_PRE:%.*]] = load i64, ptr getelementptr inbounds nuw (i8, ptr @b, i64 24), align 8
+; MDEP-NEXT: [[DOTPRE:%.*]] = mul nsw i64 [[T3_PRE]], [[T2_PRE]]
+; MDEP-NEXT: br label [[IF_END]]
+; MDEP: if.end:
+; MDEP-NEXT: [[MUL5_PRE_PHI:%.*]] = phi i64 [ [[DOTPRE]], [[IF_THEN]] ], [ [[MUL]], [[ENTRY:%.*]] ]
+; MDEP-NEXT: [[T3:%.*]] = phi i64 [ [[T3_PRE]], [[IF_THEN]] ], [ [[T1]], [[ENTRY]] ]
+; MDEP-NEXT: [[T2:%.*]] = phi i64 [ [[T2_PRE]], [[IF_THEN]] ], [ [[T0]], [[ENTRY]] ]
+; MDEP-NEXT: [[I_ADDR_0:%.*]] = phi i64 [ 3, [[IF_THEN]] ], [ [[I]], [[ENTRY]] ]
+; MDEP-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [100 x i64], ptr @a, i64 0, i64 [[I_ADDR_0]]
+; MDEP-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [100 x i64], ptr @b, i64 0, i64 [[I_ADDR_0]]
+; MDEP-NEXT: store i64 [[MUL5_PRE_PHI]], ptr @g3, align 8
+; MDEP-NEXT: ret void
+;
+; MSSA-LABEL: @test2(
+; MSSA-NEXT: entry:
+; MSSA-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [100 x i64], ptr @a, i64 0, i64 [[I:%.*]]
+; MSSA-NEXT: [[T0:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
+; MSSA-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [100 x i64], ptr @b, i64 0, i64 [[I]]
+; MSSA-NEXT: [[T1:%.*]] = load i64, ptr [[ARRAYIDX1]], align 8
+; MSSA-NEXT: [[MUL:%.*]] = mul nsw i64 [[T1]], [[T0]]
+; MSSA-NEXT: store i64 [[MUL]], ptr @g1, align 8
+; MSSA-NEXT: [[CMP:%.*]] = icmp sgt i64 [[MUL]], 3
+; MSSA-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
+; MSSA: if.then:
+; MSSA-NEXT: [[CALL:%.*]] = tail call i64 (...) @goo()
+; MSSA-NEXT: store i64 [[CALL]], ptr @g2, align 8
+; MSSA-NEXT: br label [[IF_END]]
+; MSSA: if.end:
+; MSSA-NEXT: [[I_ADDR_0:%.*]] = phi i64 [ 3, [[IF_THEN]] ], [ [[I]], [[ENTRY:%.*]] ]
+; MSSA-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [100 x i64], ptr @a, i64 0, i64 [[I_ADDR_0]]
+; MSSA-NEXT: [[T2:%.*]] = load i64, ptr [[ARRAYIDX3]], align 8
+; MSSA-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [100 x i64], ptr @b, i64 0, i64 [[I_ADDR_0]]
+; MSSA-NEXT: [[T3:%.*]] = load i64, ptr [[ARRAYIDX4]], align 8
+; MSSA-NEXT: [[MUL5:%.*]] = mul nsw i64 [[T3]], [[T2]]
+; MSSA-NEXT: store i64 [[MUL5]], ptr @g3, align 8
+; MSSA-NEXT: ret void
;
entry:
%arrayidx = getelementptr inbounds [100 x i64], ptr @a, i64 0, i64 %i
@@ -252,29 +277,50 @@ if.end3: ; preds = %if.then2, %if.else,
; available in if.then. Check that we correctly phi-translate to the phi that
; the load has been replaced with.
define void @test6(ptr %ptr, i1 %arg) {
-; CHECK-LABEL: @test6(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[ARRAYIDX1_PHI_TRANS_INSERT:%.*]] = getelementptr inbounds i32, ptr [[PTR:%.*]], i64 1
-; CHECK-NEXT: [[DOTPRE:%.*]] = load i32, ptr [[ARRAYIDX1_PHI_TRANS_INSERT]], align 4
-; CHECK-NEXT: br label [[WHILE:%.*]]
-; CHECK: while:
-; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ [[DOTPRE]], [[ENTRY:%.*]] ], [ [[TMP2:%.*]], [[IF_END:%.*]] ]
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ 1, [[ENTRY]] ], [ [[I_NEXT:%.*]], [[IF_END]] ]
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[I]]
-; CHECK-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[I_NEXT]]
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
-; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP0]], [[TMP1]]
-; CHECK-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END]]
-; CHECK: if.then:
-; CHECK-NEXT: store i32 [[TMP1]], ptr [[ARRAYIDX1]], align 4
-; CHECK-NEXT: store i32 [[TMP0]], ptr [[ARRAYIDX2]], align 4
-; CHECK-NEXT: br label [[IF_END]]
-; CHECK: if.end:
-; CHECK-NEXT: [[TMP2]] = phi i32 [ [[TMP0]], [[IF_THEN]] ], [ [[TMP1]], [[WHILE]] ]
-; CHECK-NEXT: br i1 [[ARG:%.*]], label [[WHILE_END:%.*]], label [[WHILE]]
-; CHECK: while.end:
-; CHECK-NEXT: ret void
+; MDEP-LABEL: @test6(
+; MDEP-NEXT: entry:
+; MDEP-NEXT: [[ARRAYIDX1_PHI_TRANS_INSERT:%.*]] = getelementptr inbounds i32, ptr [[PTR:%.*]], i64 1
+; MDEP-NEXT: [[DOTPRE:%.*]] = load i32, ptr [[ARRAYIDX1_PHI_TRANS_INSERT]], align 4
+; MDEP-NEXT: br label [[WHILE:%.*]]
+; MDEP: while:
+; MDEP-NEXT: [[TMP0:%.*]] = phi i32 [ [[DOTPRE]], [[ENTRY:%.*]] ], [ [[TMP2:%.*]], [[IF_END:%.*]] ]
+; MDEP-NEXT: [[I:%.*]] = phi i64 [ 1, [[ENTRY]] ], [ [[I_NEXT:%.*]], [[IF_END]] ]
+; MDEP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[I]]
+; MDEP-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
+; MDEP-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[I_NEXT]]
+; MDEP-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
+; MDEP-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP0]], [[TMP1]]
+; MDEP-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END]]
+; MDEP: if.then:
+; MDEP-NEXT: store i32 [[TMP1]], ptr [[ARRAYIDX1]], align 4
+; MDEP-NEXT: store i32 [[TMP0]], ptr [[ARRAYIDX2]], align 4
+; MDEP-NEXT: br label [[IF_END]]
+; MDEP: if.end:
+; MDEP-NEXT: [[TMP2]] = phi i32 [ [[TMP0]], [[IF_THEN]] ], [ [[TMP1]], [[WHILE]] ]
+; MDEP-NEXT: br i1 [[ARG:%.*]], label [[WHILE_END:%.*]], label [[WHILE]]
+; MDEP: while.end:
+; MDEP-NEXT: ret void
+;
+; MSSA-LABEL: @test6(
+; MSSA-NEXT: entry:
+; MSSA-NEXT: br label [[WHILE:%.*]]
+; MSSA: while:
+; MSSA-NEXT: [[I:%.*]] = phi i64 [ 1, [[ENTRY:%.*]] ], [ [[I_NEXT:%.*]], [[IF_END:%.*]] ]
+; MSSA-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[PTR:%.*]], i64 [[I]]
+; MSSA-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4
+; MSSA-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1
+; MSSA-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[I_NEXT]]
+; MSSA-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
+; MSSA-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP0]], [[TMP1]]
+; MSSA-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END]]
+; MSSA: if.then:
+; MSSA-NEXT: store i32 [[TMP1]], ptr [[ARRAYIDX1]], align 4
+; MSSA-NEXT: store i32 [[TMP0]], ptr [[ARRAYIDX2]], align 4
+; MSSA-NEXT: br label [[IF_END]]
+; MSSA: if.end:
+; MSSA-NEXT: br i1 [[ARG:%.*]], label [[WHILE_END:%.*]], label [[WHILE]]
+; MSSA: while.end:
+; MSSA-NEXT: ret void
;
entry:
br label %while
@@ -304,24 +350,40 @@ while.end:
; Load from arrayidx2 is partially redundant, check that address translation can
; fold sext + trunc across phi node together.
define i32 @test7(ptr noalias %ptr1, ptr noalias %ptr2, i32 %i, i1 %cond) {
-; CHECK-LABEL: @test7(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: br i1 [[COND:%.*]], label [[IF_THEN:%.*]], label [[ENTRY_IF_END_CRIT_EDGE:%.*]]
-; CHECK: entry.if.end_crit_edge:
-; CHECK-NEXT: [[RES_PRE:%.*]] = load i32, ptr [[PTR1:%.*]], align 4
-; CHECK-NEXT: br label [[IF_END:%.*]]
-; CHECK: if.then:
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[PTR1]], i32 [[I:%.*]]
-; CHECK-NEXT: [[TMP:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: store i32 [[TMP]], ptr [[PTR2:%.*]], align 4
-; CHECK-NEXT: [[IDX_EXT:%.*]] = sext i32 [[I]] to i64
-; CHECK-NEXT: br label [[IF_END]]
-; CHECK: if.end:
-; CHECK-NEXT: [[RES:%.*]] = phi i32 [ [[RES_PRE]], [[ENTRY_IF_END_CRIT_EDGE]] ], [ [[TMP]], [[IF_THEN]] ]
-; CHECK-NEXT: [[IDX:%.*]] = phi i64 [ 0, [[ENTRY_IF_END_CRIT_EDGE]] ], [ [[IDX_EXT]], [[IF_THEN]] ]
-; CHECK-NEXT: [[IDX_TRUNC:%.*]] = trunc i64 [[IDX]] to i32
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[PTR1]], i32 [[IDX_TRUNC]]
-; CHECK-NEXT: ret i32 [[RES]]
+; MDEP-LABEL: @test7(
+; MDEP-NEXT: entry:
+; MDEP-NEXT: br i1 [[COND:%.*]], label [[IF_THEN:%.*]], label [[ENTRY_IF_END_CRIT_EDGE:%.*]]
+; MDEP: entry.if.end_crit_edge:
+; MDEP-NEXT: [[RES_PRE:%.*]] = load i32, ptr [[PTR1:%.*]], align 4
+; MDEP-NEXT: br label [[IF_END:%.*]]
+; MDEP: if.then:
+; MDEP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[PTR1]], i32 [[I:%.*]]
+; MDEP-NEXT: [[TMP:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; MDEP-NEXT: store i32 [[TMP]], ptr [[PTR2:%.*]], align 4
+; MDEP-NEXT: [[IDX_EXT:%.*]] = sext i32 [[I]] to i64
+; MDEP-NEXT: br label [[IF_END]]
+; MDEP: if.end:
+; MDEP-NEXT: [[RES:%.*]] = phi i32 [ [[RES_PRE]], [[ENTRY_IF_END_CRIT_EDGE]] ], [ [[TMP]], [[IF_THEN]] ]
+; MDEP-NEXT: [[IDX:%.*]] = phi i64 [ 0, [[ENTRY_IF_END_CRIT_EDGE]] ], [ [[IDX_EXT]], [[IF_THEN]] ]
+; MDEP-NEXT: [[IDX_TRUNC:%.*]] = trunc i64 [[IDX]] to i32
+; MDEP-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[PTR1]], i32 [[IDX_TRUNC]]
+; MDEP-NEXT: ret i32 [[RES]]
+;
+; MSSA-LABEL: @test7(
+; MSSA-NEXT: entry:
+; MSSA-NEXT: br i1 [[COND:%.*]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
+; MSSA: if.then:
+; MSSA-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[PTR1:%.*]], i32 [[I:%.*]]
+; MSSA-NEXT: [[TMP:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; MSSA-NEXT: store i32 [[TMP]], ptr [[PTR2:%.*]], align 4
+; MSSA-NEXT: [[IDX_EXT:%.*]] = sext i32 [[I]] to i64
+; MSSA-NEXT: br label [[IF_END]]
+; MSSA: if.end:
+; MSSA-NEXT: [[IDX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IDX_EXT]], [[IF_THEN]] ]
+; MSSA-NEXT: [[IDX_TRUNC:%.*]] = trunc i64 [[IDX]] to i32
+; MSSA-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[PTR1]], i32 [[IDX_TRUNC]]
+; MSSA-NEXT: [[RES:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
+; MSSA-NEXT: ret i32 [[RES]]
;
entry:
br i1 %cond, label %if.then, label %if.end
diff --git a/llvm/test/Transforms/GVN/PRE/phi-translate-add.ll b/llvm/test/Transforms/GVN/PRE/phi-translate-add.ll
index ea43307..cb05a8e 100644
--- a/llvm/test/Transforms/GVN/PRE/phi-translate-add.ll
+++ b/llvm/test/Transforms/GVN/PRE/phi-translate-add.ll
@@ -1,21 +1,35 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -passes=gvn -gvn-add-phi-translation=true -S < %s | FileCheck %s --check-prefix=ADD-TRANS-ON
-; RUN: opt -passes=gvn -gvn-add-phi-translation=false -S < %s | FileCheck %s --check-prefix=ADD-TRANS-OFF
+; RUN: opt -passes=gvn -gvn-add-phi-translation=true -S < %s | FileCheck %s --check-prefix=ADD-TRANS-ON --check-prefixes=CHECK,PT-ON-MDEP
+; RUN: opt -passes='gvn<memoryssa>' -gvn-add-phi-translation=true -S < %s | FileCheck %s --check-prefix=ADD-TRANS-ON --check-prefixes=CHECK,PT-ON-MSSA
+; RUN: opt -passes=gvn -gvn-add-phi-translation=false -S < %s | FileCheck %s --check-prefix=ADD-TRANS-OFF --check-prefixes=CHECK,PT-OFF-MDEP
+; RUN: opt -passes='gvn<memoryssa>' -gvn-add-phi-translation=false -S < %s | FileCheck %s --check-prefix=ADD-TRANS-OFF --check-prefixes=CHECK,PT-OFF-MSSA
; Test that phi translation is able to hoist a load whose address
; depends on an add also being hoisted.
define double @phi_translation_hoists_add(ptr %a, i64 %idx) {
-; ADD-TRANS-ON-LABEL: @phi_translation_hoists_add(
-; ADD-TRANS-ON-NEXT: entry:
-; ADD-TRANS-ON-NEXT: [[ADD_PHI_TRANS_INSERT:%.*]] = add nuw nsw i64 [[IDX:%.*]], 1
-; ADD-TRANS-ON-NEXT: [[GEP_PHI_TRANS_INSERT:%.*]] = getelementptr inbounds double, ptr [[A:%.*]], i64 [[ADD_PHI_TRANS_INSERT]]
-; ADD-TRANS-ON-NEXT: [[LOAD_PRE:%.*]] = load double, ptr [[GEP_PHI_TRANS_INSERT]], align 8
-; ADD-TRANS-ON-NEXT: br label [[FOR_BODY:%.*]]
-; ADD-TRANS-ON: for.body:
-; ADD-TRANS-ON-NEXT: [[CMP:%.*]] = fcmp ole double [[LOAD_PRE]], 1.000000e+00
-; ADD-TRANS-ON-NEXT: br i1 [[CMP]], label [[EXIT:%.*]], label [[FOR_BODY]]
-; ADD-TRANS-ON: exit:
-; ADD-TRANS-ON-NEXT: ret double [[LOAD_PRE]]
+; PT-ON-MDEP-LABEL: @phi_translation_hoists_add(
+; PT-ON-MDEP-NEXT: entry:
+; PT-ON-MDEP-NEXT: [[ADD_PHI_TRANS_INSERT:%.*]] = add nuw nsw i64 [[IDX:%.*]], 1
+; PT-ON-MDEP-NEXT: [[GEP_PHI_TRANS_INSERT:%.*]] = getelementptr inbounds double, ptr [[A:%.*]], i64 [[ADD_PHI_TRANS_INSERT]]
+; PT-ON-MDEP-NEXT: [[LOAD_PRE:%.*]] = load double, ptr [[GEP_PHI_TRANS_INSERT]], align 8
+; PT-ON-MDEP-NEXT: br label [[FOR_BODY:%.*]]
+; PT-ON-MDEP: for.body:
+; PT-ON-MDEP-NEXT: [[CMP:%.*]] = fcmp ole double [[LOAD_PRE]], 1.000000e+00
+; PT-ON-MDEP-NEXT: br i1 [[CMP]], label [[EXIT:%.*]], label [[FOR_BODY]]
+; PT-ON-MDEP: exit:
+; PT-ON-MDEP-NEXT: ret double [[LOAD_PRE]]
+;
+; PT-ON-MSSA-LABEL: @phi_translation_hoists_add(
+; PT-ON-MSSA-NEXT: entry:
+; PT-ON-MSSA-NEXT: br label [[FOR_BODY:%.*]]
+; PT-ON-MSSA: for.body:
+; PT-ON-MSSA-NEXT: [[ADD:%.*]] = add nuw nsw i64 [[IDX:%.*]], 1
+; PT-ON-MSSA-NEXT: [[GEP:%.*]] = getelementptr inbounds double, ptr [[A:%.*]], i64 [[ADD]]
+; PT-ON-MSSA-NEXT: [[LOAD:%.*]] = load double, ptr [[GEP]], align 8
+; PT-ON-MSSA-NEXT: [[CMP:%.*]] = fcmp ole double [[LOAD]], 1.000000e+00
+; PT-ON-MSSA-NEXT: br i1 [[CMP]], label [[EXIT:%.*]], label [[FOR_BODY]]
+; PT-ON-MSSA: exit:
+; PT-ON-MSSA-NEXT: ret double [[LOAD]]
;
; ADD-TRANS-OFF-LABEL: @phi_translation_hoists_add(
; ADD-TRANS-OFF-NEXT: entry:
@@ -42,3 +56,8 @@ for.body: ; preds = %for.body, %entry
exit:
ret double %load
}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; ADD-TRANS-ON: {{.*}}
+; CHECK: {{.*}}
+; PT-OFF-MDEP: {{.*}}
+; PT-OFF-MSSA: {{.*}}
diff --git a/llvm/test/Transforms/GVN/PRE/phi-translate.ll b/llvm/test/Transforms/GVN/PRE/phi-translate.ll
index 713f012..084c449 100644
--- a/llvm/test/Transforms/GVN/PRE/phi-translate.ll
+++ b/llvm/test/Transforms/GVN/PRE/phi-translate.ll
@@ -1,23 +1,53 @@
-; RUN: opt -passes=gvn -S < %s | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -passes=gvn -S < %s | FileCheck %s --check-prefixes=CHECK,MDEP
+; RUN: opt -passes='gvn<memoryssa>' -S < %s | FileCheck %s --check-prefixes=CHECK,MSSA
target datalayout = "e-p:64:64:64"
-; CHECK-LABEL: @foo(
-; CHECK: entry.end_crit_edge:
-; CHECK: %[[INDEX:[a-z0-9.]+]] = sext i32 %x to i64{{$}}
-; CHECK: %[[ADDRESS:[a-z0-9.]+]] = getelementptr [100 x i32], ptr @G, i64 0, i64 %[[INDEX]]{{$}}
-; CHECK: %n.pre = load i32, ptr %[[ADDRESS]], align 4, !dbg [[N_LOC:![0-9]+]]
-; CHECK: br label %end
-; CHECK: then:
-; CHECK: store i32 %z
-; CHECK: end:
-; CHECK: %n = phi i32 [ %n.pre, %entry.end_crit_edge ], [ %z, %then ], !dbg [[N_LOC]]
-; CHECK: ret i32 %n
-; CHECK: [[N_LOC]] = !DILocation(line: 47, column: 1, scope: !{{.*}})
@G = external global [100 x i32]
define i32 @foo(i32 %x, i32 %z) !dbg !6 {
+; MDEP-LABEL: define i32 @foo(
+; MDEP-SAME: i32 [[X:%.*]], i32 [[Z:%.*]]) !dbg [[DBG5:![0-9]+]] {
+; MDEP-NEXT: [[ENTRY:.*:]]
+; MDEP-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[Z]], 0, !dbg [[DBG8:![0-9]+]]
+; MDEP-NEXT: br i1 [[TOBOOL]], label %[[ENTRY_END_CRIT_EDGE:.*]], label %[[THEN:.*]], !dbg [[DBG8]]
+; MDEP: [[ENTRY_END_CRIT_EDGE]]:
+; MDEP-NEXT: [[J_PHI_TRANS_INSERT:%.*]] = sext i32 [[X]] to i64
+; MDEP-NEXT: [[Q_PHI_TRANS_INSERT:%.*]] = getelementptr [100 x i32], ptr @G, i64 0, i64 [[J_PHI_TRANS_INSERT]]
+; MDEP-NEXT: [[N_PRE:%.*]] = load i32, ptr [[Q_PHI_TRANS_INSERT]], align 4, !dbg [[DBG9:![0-9]+]]
+; MDEP-NEXT: br label %[[END:.*]], !dbg [[DBG8]]
+; MDEP: [[THEN]]:
+; MDEP-NEXT: [[I:%.*]] = sext i32 [[X]] to i64, !dbg [[DBG10:![0-9]+]]
+; MDEP-NEXT: [[P:%.*]] = getelementptr [100 x i32], ptr @G, i64 0, i64 [[I]], !dbg [[DBG10]]
+; MDEP-NEXT: store i32 [[Z]], ptr [[P]], align 4, !dbg [[DBG10]]
+; MDEP-NEXT: br label %[[END]], !dbg [[DBG10]]
+; MDEP: [[END]]:
+; MDEP-NEXT: [[J_PRE_PHI:%.*]] = phi i64 [ [[J_PHI_TRANS_INSERT]], %[[ENTRY_END_CRIT_EDGE]] ], [ [[I]], %[[THEN]] ], !dbg [[DBG11:![0-9]+]]
+; MDEP-NEXT: [[N:%.*]] = phi i32 [ [[N_PRE]], %[[ENTRY_END_CRIT_EDGE]] ], [ [[Z]], %[[THEN]] ], !dbg [[DBG9]]
+; MDEP-NEXT: [[Q:%.*]] = getelementptr [100 x i32], ptr @G, i64 0, i64 [[J_PRE_PHI]], !dbg [[DBG12:![0-9]+]]
+; MDEP-NEXT: ret i32 [[N]], !dbg [[DBG9]]
+;
+; MSSA-LABEL: define i32 @foo(
+; MSSA-SAME: i32 [[X:%.*]], i32 [[Z:%.*]]) !dbg [[DBG5:![0-9]+]] {
+; MSSA-NEXT: [[ENTRY:.*:]]
+; MSSA-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[Z]], 0, !dbg [[DBG8:![0-9]+]]
+; MSSA-NEXT: br i1 [[TOBOOL]], label %[[ENTRY_END_CRIT_EDGE:.*]], label %[[THEN:.*]], !dbg [[DBG8]]
+; MSSA: [[ENTRY_END_CRIT_EDGE]]:
+; MSSA-NEXT: [[DOTPRE:%.*]] = sext i32 [[X]] to i64, !dbg [[DBG9:![0-9]+]]
+; MSSA-NEXT: br label %[[END:.*]], !dbg [[DBG8]]
+; MSSA: [[THEN]]:
+; MSSA-NEXT: [[I:%.*]] = sext i32 [[X]] to i64, !dbg [[DBG10:![0-9]+]]
+; MSSA-NEXT: [[P:%.*]] = getelementptr [100 x i32], ptr @G, i64 0, i64 [[I]], !dbg [[DBG10]]
+; MSSA-NEXT: store i32 [[Z]], ptr [[P]], align 4, !dbg [[DBG10]]
+; MSSA-NEXT: br label %[[END]], !dbg [[DBG10]]
+; MSSA: [[END]]:
+; MSSA-NEXT: [[J_PRE_PHI:%.*]] = phi i64 [ [[DOTPRE]], %[[ENTRY_END_CRIT_EDGE]] ], [ [[I]], %[[THEN]] ], !dbg [[DBG9]]
+; MSSA-NEXT: [[Q:%.*]] = getelementptr [100 x i32], ptr @G, i64 0, i64 [[J_PRE_PHI]], !dbg [[DBG11:![0-9]+]]
+; MSSA-NEXT: [[N:%.*]] = load i32, ptr [[Q]], align 4, !dbg [[DBG12:![0-9]+]]
+; MSSA-NEXT: ret i32 [[N]], !dbg [[DBG12]]
+;
entry:
%tobool = icmp eq i32 %z, 0, !dbg !7
br i1 %tobool, label %end, label %then, !dbg !7
@@ -51,6 +81,31 @@ end:
!10 = !DILocation(line: 46, column: 1, scope: !6)
!11 = !DILocation(line: 47, column: 1, scope: !6)
!12 = distinct !DICompileUnit(language: DW_LANG_C99, producer: "clang",
- file: !5,
- isOptimized: true, flags: "-O2",
- splitDebugFilename: "abc.debug", emissionKind: 2)
+ file: !5,
+ isOptimized: true, flags: "-O2",
+ splitDebugFilename: "abc.debug", emissionKind: 2)
+;.
+; MDEP: [[META3:![0-9]+]] = distinct !DICompileUnit(language: DW_LANG_C99, file: [[META4:![0-9]+]], producer: "clang", isOptimized: true, flags: "-O2", runtimeVersion: 0, splitDebugFilename: "abc.debug", emissionKind: LineTablesOnly)
+; MDEP: [[META4]] = !DIFile(filename: "{{.*}}a.cc", directory: {{.*}})
+; MDEP: [[DBG5]] = distinct !DISubprogram(name: "foo", scope: [[META4]], file: [[META4]], line: 42, type: [[META6:![0-9]+]], scopeLine: 43, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition, unit: [[META3]], retainedNodes: [[META7:![0-9]+]])
+; MDEP: [[META6]] = !DISubroutineType(types: [[META7]])
+; MDEP: [[META7]] = !{}
+; MDEP: [[DBG8]] = !DILocation(line: 43, column: 1, scope: [[DBG5]])
+; MDEP: [[DBG9]] = !DILocation(line: 47, column: 1, scope: [[DBG5]])
+; MDEP: [[DBG10]] = !DILocation(line: 44, column: 1, scope: [[DBG5]])
+; MDEP: [[DBG11]] = !DILocation(line: 45, column: 1, scope: [[DBG5]])
+; MDEP: [[DBG12]] = !DILocation(line: 46, column: 1, scope: [[DBG5]])
+;.
+; MSSA: [[META3:![0-9]+]] = distinct !DICompileUnit(language: DW_LANG_C99, file: [[META4:![0-9]+]], producer: "clang", isOptimized: true, flags: "-O2", runtimeVersion: 0, splitDebugFilename: "abc.debug", emissionKind: LineTablesOnly)
+; MSSA: [[META4]] = !DIFile(filename: "{{.*}}a.cc", directory: {{.*}})
+; MSSA: [[DBG5]] = distinct !DISubprogram(name: "foo", scope: [[META4]], file: [[META4]], line: 42, type: [[META6:![0-9]+]], scopeLine: 43, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition, unit: [[META3]], retainedNodes: [[META7:![0-9]+]])
+; MSSA: [[META6]] = !DISubroutineType(types: [[META7]])
+; MSSA: [[META7]] = !{}
+; MSSA: [[DBG8]] = !DILocation(line: 43, column: 1, scope: [[DBG5]])
+; MSSA: [[DBG9]] = !DILocation(line: 45, column: 1, scope: [[DBG5]])
+; MSSA: [[DBG10]] = !DILocation(line: 44, column: 1, scope: [[DBG5]])
+; MSSA: [[DBG11]] = !DILocation(line: 46, column: 1, scope: [[DBG5]])
+; MSSA: [[DBG12]] = !DILocation(line: 47, column: 1, scope: [[DBG5]])
+;.
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/Transforms/GVN/PRE/pre-aliasning-path.ll b/llvm/test/Transforms/GVN/PRE/pre-aliasning-path.ll
index 9ca3e1b..60611a0 100644
--- a/llvm/test/Transforms/GVN/PRE/pre-aliasning-path.ll
+++ b/llvm/test/Transforms/GVN/PRE/pre-aliasning-path.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -enable-load-pre -enable-pre -passes=gvn -S < %s | FileCheck %s
+; RUN: opt -enable-load-pre -enable-pre -passes=gvn -S < %s | FileCheck %s --check-prefixes=CHECK,MDEP
+; RUN: opt -enable-load-pre -enable-pre -passes='gvn<memoryssa>' -S < %s | FileCheck %s --check-prefixes=CHECK,MSSA
declare void @side_effect_0() nofree
@@ -102,25 +103,45 @@ exit:
}
define i32 @test_03(ptr %p) {
-; CHECK-LABEL: @test_03(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[X_PRE:%.*]] = load i32, ptr [[P:%.*]], align 4
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[BACKEDGE:%.*]] ]
-; CHECK-NEXT: [[COND:%.*]] = icmp ult i32 [[X_PRE]], 100
-; CHECK-NEXT: br i1 [[COND]], label [[HOT_PATH:%.*]], label [[COLD_PATH:%.*]]
-; CHECK: hot_path:
-; CHECK-NEXT: br label [[BACKEDGE]]
-; CHECK: cold_path:
-; CHECK-NEXT: call void @no_side_effect()
-; CHECK-NEXT: br label [[BACKEDGE]]
-; CHECK: backedge:
-; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], [[X_PRE]]
-; CHECK-NEXT: [[LOOP_COND:%.*]] = icmp ult i32 [[IV_NEXT]], 1000
-; CHECK-NEXT: br i1 [[LOOP_COND]], label [[LOOP]], label [[EXIT:%.*]]
-; CHECK: exit:
-; CHECK-NEXT: ret i32 [[X_PRE]]
+; MDEP-LABEL: @test_03(
+; MDEP-NEXT: entry:
+; MDEP-NEXT: [[X_PRE:%.*]] = load i32, ptr [[P:%.*]], align 4
+; MDEP-NEXT: br label [[LOOP:%.*]]
+; MDEP: loop:
+; MDEP-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[BACKEDGE:%.*]] ]
+; MDEP-NEXT: [[COND:%.*]] = icmp ult i32 [[X_PRE]], 100
+; MDEP-NEXT: br i1 [[COND]], label [[HOT_PATH:%.*]], label [[COLD_PATH:%.*]]
+; MDEP: hot_path:
+; MDEP-NEXT: br label [[BACKEDGE]]
+; MDEP: cold_path:
+; MDEP-NEXT: call void @no_side_effect()
+; MDEP-NEXT: br label [[BACKEDGE]]
+; MDEP: backedge:
+; MDEP-NEXT: [[IV_NEXT]] = add i32 [[IV]], [[X_PRE]]
+; MDEP-NEXT: [[LOOP_COND:%.*]] = icmp ult i32 [[IV_NEXT]], 1000
+; MDEP-NEXT: br i1 [[LOOP_COND]], label [[LOOP]], label [[EXIT:%.*]]
+; MDEP: exit:
+; MDEP-NEXT: ret i32 [[X_PRE]]
+;
+; MSSA-LABEL: @test_03(
+; MSSA-NEXT: entry:
+; MSSA-NEXT: br label [[LOOP:%.*]]
+; MSSA: loop:
+; MSSA-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[BACKEDGE:%.*]] ]
+; MSSA-NEXT: [[X:%.*]] = load i32, ptr [[P:%.*]], align 4
+; MSSA-NEXT: [[COND:%.*]] = icmp ult i32 [[X]], 100
+; MSSA-NEXT: br i1 [[COND]], label [[HOT_PATH:%.*]], label [[COLD_PATH:%.*]]
+; MSSA: hot_path:
+; MSSA-NEXT: br label [[BACKEDGE]]
+; MSSA: cold_path:
+; MSSA-NEXT: call void @no_side_effect()
+; MSSA-NEXT: br label [[BACKEDGE]]
+; MSSA: backedge:
+; MSSA-NEXT: [[IV_NEXT]] = add i32 [[IV]], [[X]]
+; MSSA-NEXT: [[LOOP_COND:%.*]] = icmp ult i32 [[IV_NEXT]], 1000
+; MSSA-NEXT: br i1 [[LOOP_COND]], label [[LOOP]], label [[EXIT:%.*]]
+; MSSA: exit:
+; MSSA-NEXT: ret i32 [[X]]
;
entry:
br label %loop
diff --git a/llvm/test/Transforms/GVN/PRE/pre-basic-add.ll b/llvm/test/Transforms/GVN/PRE/pre-basic-add.ll
index f099ddc..9bf6496 100644
--- a/llvm/test/Transforms/GVN/PRE/pre-basic-add.ll
+++ b/llvm/test/Transforms/GVN/PRE/pre-basic-add.ll
@@ -1,33 +1,53 @@
-; RUN: opt < %s -passes=gvn -enable-pre -S | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt < %s -passes=gvn -enable-pre -S | FileCheck %s --check-prefixes=CHECK,MDEP
+; RUN: opt < %s -passes='gvn<memoryssa>' -enable-pre -S | FileCheck %s --check-prefixes=CHECK,MSSA
; RUN: opt < %s -passes="gvn<pre>" -enable-pre=false -S | FileCheck %s
@H = common global i32 0 ; <ptr> [#uses=2]
@G = common global i32 0 ; <ptr> [#uses=1]
define i32 @test() nounwind {
+; CHECK-LABEL: define i32 @test(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @H, align 4
+; CHECK-NEXT: [[TMP1:%.*]] = call i32 (...) @foo() #[[ATTR0]]
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0
+; CHECK-NEXT: br i1 [[TMP2]], label %[[BB:.*]], label %[[ENTRY_BB1_CRIT_EDGE:.*]]
+; CHECK: [[ENTRY_BB1_CRIT_EDGE]]:
+; CHECK-NEXT: [[DOTPRE:%.*]] = add i32 [[TMP0]], 42
+; CHECK-NEXT: br label %[[BB1:.*]]
+; CHECK: [[BB]]:
+; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[TMP0]], 42
+; CHECK-NEXT: store i32 [[TMP3]], ptr @G, align 4
+; CHECK-NEXT: br label %[[BB1]]
+; CHECK: [[BB1]]:
+; CHECK-NEXT: [[DOTPRE_PHI:%.*]] = phi i32 [ [[DOTPRE]], %[[ENTRY_BB1_CRIT_EDGE]] ], [ [[TMP3]], %[[BB]] ]
+; CHECK-NEXT: store i32 [[DOTPRE_PHI]], ptr @H, align 4
+; CHECK-NEXT: ret i32 0
+;
entry:
- %0 = load i32, ptr @H, align 4 ; <i32> [#uses=2]
- %1 = call i32 (...) @foo() nounwind ; <i32> [#uses=1]
- %2 = icmp ne i32 %1, 0 ; <i1> [#uses=1]
- br i1 %2, label %bb, label %bb1
+ %0 = load i32, ptr @H, align 4 ; <i32> [#uses=2]
+ %1 = call i32 (...) @foo() nounwind ; <i32> [#uses=1]
+ %2 = icmp ne i32 %1, 0 ; <i1> [#uses=1]
+ br i1 %2, label %bb, label %bb1
bb: ; preds = %entry
- %3 = add i32 %0, 42 ; <i32> [#uses=1]
-; CHECK: %.pre = add i32 %0, 42
- store i32 %3, ptr @G, align 4
- br label %bb1
+ %3 = add i32 %0, 42 ; <i32> [#uses=1]
+ store i32 %3, ptr @G, align 4
+ br label %bb1
bb1: ; preds = %bb, %entry
- %4 = add i32 %0, 42 ; <i32> [#uses=1]
- store i32 %4, ptr @H, align 4
- br label %return
+ %4 = add i32 %0, 42 ; <i32> [#uses=1]
+ store i32 %4, ptr @H, align 4
+ br label %return
-; CHECK: %.pre-phi = phi i32 [ %.pre, %entry.bb1_crit_edge ], [ %3, %bb ]
-; CHECK-NEXT: store i32 %.pre-phi, ptr @H, align 4
-; CHECK-NEXT: ret i32 0
return: ; preds = %bb1
- ret i32 0
+ ret i32 0
}
declare i32 @foo(...)
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; MDEP: {{.*}}
+; MSSA: {{.*}}
diff --git a/llvm/test/Transforms/GVN/PRE/pre-jt-add.ll b/llvm/test/Transforms/GVN/PRE/pre-jt-add.ll
index 95f8f3f..f62d06d 100644
--- a/llvm/test/Transforms/GVN/PRE/pre-jt-add.ll
+++ b/llvm/test/Transforms/GVN/PRE/pre-jt-add.ll
@@ -1,16 +1,33 @@
-; RUN: opt < %s -passes=gvn,jump-threading -enable-pre -S | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt < %s -passes=gvn,jump-threading -enable-pre -S | FileCheck %s --check-prefixes=CHECK,MDEP
+; RUN: opt < %s -passes='gvn<memoryssa>',jump-threading -enable-pre -S | FileCheck %s --check-prefixes=CHECK,MSSA
@H = common global i32 0
@G = common global i32 0
define i32 @test(i1 %cond, i32 %v) nounwind {
-; CHECK-LABEL: @test
+; CHECK-LABEL: define i32 @test(
+; CHECK-SAME: i1 [[COND:%.*]], i32 [[V:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: br i1 [[COND]], label %[[BB:.*]], label %[[MERGE:.*]]
+; CHECK: [[BB]]:
+; CHECK-NEXT: store i32 -1, ptr @G, align 4
+; CHECK-NEXT: br label %[[MERGE]]
+; CHECK: [[MERGE]]:
+; CHECK-NEXT: [[ADD_2:%.*]] = add i32 [[V]], -1
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[ADD_2]], 0
+; CHECK-NEXT: br i1 [[CMP]], label %[[ACTION:.*]], label %[[RETURN:.*]]
+; CHECK: [[ACTION]]:
+; CHECK-NEXT: store i32 [[ADD_2]], ptr @H, align 4
+; CHECK-NEXT: br label %[[RETURN]]
+; CHECK: [[RETURN]]:
+; CHECK-NEXT: [[P:%.*]] = phi i32 [ 0, %[[MERGE]] ], [ 1, %[[ACTION]] ]
+; CHECK-NEXT: ret i32 [[P]]
+;
entry:
br i1 %cond, label %bb, label %bb1
bb:
-; CHECK: store
-; CHECK-NOT: br label %return
%add.1 = add nuw nsw i32 %v, -1
store i32 %add.1, ptr @G, align 4
br label %merge
@@ -24,8 +41,6 @@ merge:
br i1 %cmp, label %action, label %return
action:
-; CHECK: store
-; CHECK-NEXT: br label %return
store i32 %add.2, ptr @H, align 4
br label %return
@@ -34,3 +49,6 @@ return:
ret i32 %p
}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; MDEP: {{.*}}
+; MSSA: {{.*}}
diff --git a/llvm/test/Transforms/GVN/PRE/pre-load-dbg.ll b/llvm/test/Transforms/GVN/PRE/pre-load-dbg.ll
index 8c020fd..f961f23 100644
--- a/llvm/test/Transforms/GVN/PRE/pre-load-dbg.ll
+++ b/llvm/test/Transforms/GVN/PRE/pre-load-dbg.ll
@@ -1,4 +1,6 @@
-; RUN: opt < %s -passes=gvn -gvn-max-num-insns=22 -S | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt < %s -passes=gvn -gvn-max-num-insns=22 -S | FileCheck %s --check-prefixes=CHECK,MDEP
+; RUN: opt < %s -passes='gvn<memoryssa>' -gvn-max-num-insns=22 -S | FileCheck %s --check-prefixes=CHECK,MSSA
; Debug information should not impact gvn. The following two functions have same
; code except debug information. They should generate same optimized
@@ -11,13 +13,80 @@
@h = global %struct.a zeroinitializer, align 1
define void @withdbg() {
-; CHECK-LABEL: @withdbg
-; CHECK: [[PRE_PRE1:%.*]] = load i16, ptr @f, align 1
-; CHECK-NEXT: [[PRE_PRE2:%.*]] = load ptr, ptr @m, align 1
-; CHECK-NEXT: br i1 true, label %[[BLOCK1:.*]], label %[[BLOCK2:.*]]
-; CHECK: [[BLOCK1]]:
-; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[PRE_PRE1]] to i32
-; CHECK-NEXT: store i32 [[CONV]], ptr [[PRE_PRE2]], align 1
+; MDEP-LABEL: define void @withdbg() {
+; MDEP-NEXT: [[ENTRY:.*:]]
+; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_I:%.*]] = alloca i16, align 1
+; MDEP-NEXT: [[TMP11_PRE:%.*]] = load i16, ptr @f, align 1
+; MDEP-NEXT: [[TMP12_PRE:%.*]] = load ptr, ptr @m, align 1
+; MDEP-NEXT: br i1 true, label %[[LOR_END:.*]], label %[[LOR_RHS:.*]]
+; MDEP: [[LOR_RHS]]:
+; MDEP-NEXT: #dbg_declare(ptr undef, [[META4:![0-9]+]], !DIExpression(), [[META14:![0-9]+]])
+; MDEP-NEXT: #dbg_declare(ptr undef, [[META10:![0-9]+]], !DIExpression(), [[META14]])
+; MDEP-NEXT: #dbg_declare(ptr undef, [[META11:![0-9]+]], !DIExpression(), [[META14]])
+; MDEP-NEXT: #dbg_declare(ptr undef, [[META12:![0-9]+]], !DIExpression(), [[META14]])
+; MDEP-NEXT: #dbg_declare(ptr undef, [[META13:![0-9]+]], !DIExpression(), [[META14]])
+; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_I:%.*]] = load volatile i16, ptr @h, align 1
+; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_1_I:%.*]] = load volatile i16, ptr @h, align 1
+; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_1_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_2_I:%.*]] = load volatile i16, ptr @h, align 1
+; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_2_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_3_I:%.*]] = load volatile i16, ptr @h, align 1
+; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_3_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_4_I:%.*]] = load volatile i16, ptr @h, align 1
+; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_4_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_5_I:%.*]] = load volatile i16, ptr @h, align 1
+; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_5_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_6_I:%.*]] = load volatile i16, ptr @h, align 1
+; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_6_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_7_I:%.*]] = load volatile i16, ptr @h, align 1
+; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_7_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_8_I:%.*]] = load volatile i16, ptr @h, align 1
+; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_8_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MDEP-NEXT: br label %[[LOR_END]]
+; MDEP: [[LOR_END]]:
+; MDEP-NEXT: [[CONV_I_I6:%.*]] = sext i16 [[TMP11_PRE]] to i32
+; MDEP-NEXT: store i32 [[CONV_I_I6]], ptr [[TMP12_PRE]], align 1
+; MDEP-NEXT: ret void
+;
+; MSSA-LABEL: define void @withdbg() {
+; MSSA-NEXT: [[ENTRY:.*:]]
+; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_I:%.*]] = alloca i16, align 1
+; MSSA-NEXT: br i1 true, label %[[LOR_END:.*]], label %[[LOR_RHS:.*]]
+; MSSA: [[LOR_RHS]]:
+; MSSA-NEXT: #dbg_declare(ptr undef, [[META4:![0-9]+]], !DIExpression(), [[META14:![0-9]+]])
+; MSSA-NEXT: #dbg_declare(ptr undef, [[META10:![0-9]+]], !DIExpression(), [[META14]])
+; MSSA-NEXT: #dbg_declare(ptr undef, [[META11:![0-9]+]], !DIExpression(), [[META14]])
+; MSSA-NEXT: #dbg_declare(ptr undef, [[META12:![0-9]+]], !DIExpression(), [[META14]])
+; MSSA-NEXT: #dbg_declare(ptr undef, [[META13:![0-9]+]], !DIExpression(), [[META14]])
+; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_I:%.*]] = load volatile i16, ptr @h, align 1
+; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_1_I:%.*]] = load volatile i16, ptr @h, align 1
+; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_1_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_2_I:%.*]] = load volatile i16, ptr @h, align 1
+; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_2_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_3_I:%.*]] = load volatile i16, ptr @h, align 1
+; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_3_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_4_I:%.*]] = load volatile i16, ptr @h, align 1
+; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_4_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_5_I:%.*]] = load volatile i16, ptr @h, align 1
+; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_5_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_6_I:%.*]] = load volatile i16, ptr @h, align 1
+; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_6_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_7_I:%.*]] = load volatile i16, ptr @h, align 1
+; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_7_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_8_I:%.*]] = load volatile i16, ptr @h, align 1
+; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_8_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MSSA-NEXT: [[FVALUE:%.*]] = load i16, ptr @f, align 1
+; MSSA-NEXT: [[MVALUE:%.*]] = load ptr, ptr @m, align 1
+; MSSA-NEXT: br label %[[LOR_END]]
+; MSSA: [[LOR_END]]:
+; MSSA-NEXT: [[TMP11:%.*]] = load i16, ptr @f, align 1
+; MSSA-NEXT: [[CONV_I_I6:%.*]] = sext i16 [[TMP11]] to i32
+; MSSA-NEXT: [[TMP12:%.*]] = load ptr, ptr @m, align 1
+; MSSA-NEXT: store i32 [[CONV_I_I6]], ptr [[TMP12]], align 1
+; MSSA-NEXT: ret void
+;
entry:
%agg.tmp.ensured.sroa.0.i = alloca i16, align 1
@@ -61,13 +130,70 @@ lor.end: ; preds = %lor.rhs, %entry
}
define void @lessdbg() {
-; CHECK-LABEL: @lessdbg
-; CHECK: [[PRE_PRE1:%.*]] = load i16, ptr @f, align 1
-; CHECK-NEXT: [[PRE_PRE2:%.*]] = load ptr, ptr @m, align 1
-; CHECK-NEXT: br i1 true, label %[[BLOCK1:.*]], label %[[BLOCK2:.*]]
-; CHECK: [[BLOCK1]]:
-; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[PRE_PRE1]] to i32
-; CHECK-NEXT: store i32 [[CONV]], ptr [[PRE_PRE2]], align 1
+; MDEP-LABEL: define void @lessdbg() {
+; MDEP-NEXT: [[ENTRY:.*:]]
+; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_I:%.*]] = alloca i16, align 1
+; MDEP-NEXT: [[TMP11_PRE:%.*]] = load i16, ptr @f, align 1
+; MDEP-NEXT: [[TMP12_PRE:%.*]] = load ptr, ptr @m, align 1
+; MDEP-NEXT: br i1 true, label %[[LOR_END:.*]], label %[[LOR_RHS:.*]]
+; MDEP: [[LOR_RHS]]:
+; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_I:%.*]] = load volatile i16, ptr @h, align 1
+; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_1_I:%.*]] = load volatile i16, ptr @h, align 1
+; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_1_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_2_I:%.*]] = load volatile i16, ptr @h, align 1
+; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_2_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_3_I:%.*]] = load volatile i16, ptr @h, align 1
+; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_3_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_4_I:%.*]] = load volatile i16, ptr @h, align 1
+; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_4_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_5_I:%.*]] = load volatile i16, ptr @h, align 1
+; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_5_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_6_I:%.*]] = load volatile i16, ptr @h, align 1
+; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_6_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_7_I:%.*]] = load volatile i16, ptr @h, align 1
+; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_7_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MDEP-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_8_I:%.*]] = load volatile i16, ptr @h, align 1
+; MDEP-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_8_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MDEP-NEXT: br label %[[LOR_END]]
+; MDEP: [[LOR_END]]:
+; MDEP-NEXT: [[CONV_I_I6:%.*]] = sext i16 [[TMP11_PRE]] to i32
+; MDEP-NEXT: store i32 [[CONV_I_I6]], ptr [[TMP12_PRE]], align 1
+; MDEP-NEXT: ret void
+;
+; MSSA-LABEL: define void @lessdbg() {
+; MSSA-NEXT: [[ENTRY:.*:]]
+; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_I:%.*]] = alloca i16, align 1
+; MSSA-NEXT: br i1 true, label %[[LOR_END:.*]], label %[[LOR_RHS:.*]]
+; MSSA: [[LOR_RHS]]:
+; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_I:%.*]] = load volatile i16, ptr @h, align 1
+; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_1_I:%.*]] = load volatile i16, ptr @h, align 1
+; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_1_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_2_I:%.*]] = load volatile i16, ptr @h, align 1
+; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_2_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_3_I:%.*]] = load volatile i16, ptr @h, align 1
+; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_3_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_4_I:%.*]] = load volatile i16, ptr @h, align 1
+; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_4_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_5_I:%.*]] = load volatile i16, ptr @h, align 1
+; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_5_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_6_I:%.*]] = load volatile i16, ptr @h, align 1
+; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_6_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_7_I:%.*]] = load volatile i16, ptr @h, align 1
+; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_7_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MSSA-NEXT: [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_8_I:%.*]] = load volatile i16, ptr @h, align 1
+; MSSA-NEXT: store i16 [[AGG_TMP_ENSURED_SROA_0_0_COPYLOAD_8_I]], ptr [[AGG_TMP_ENSURED_SROA_0_I]], align 1
+; MSSA-NEXT: [[FVALUE:%.*]] = load i16, ptr @f, align 1
+; MSSA-NEXT: [[MVALUE:%.*]] = load ptr, ptr @m, align 1
+; MSSA-NEXT: br label %[[LOR_END]]
+; MSSA: [[LOR_END]]:
+; MSSA-NEXT: [[TMP11:%.*]] = load i16, ptr @f, align 1
+; MSSA-NEXT: [[CONV_I_I6:%.*]] = sext i16 [[TMP11]] to i32
+; MSSA-NEXT: [[TMP12:%.*]] = load ptr, ptr @m, align 1
+; MSSA-NEXT: store i32 [[CONV_I_I6]], ptr [[TMP12]], align 1
+; MSSA-NEXT: ret void
+;
entry:
%agg.tmp.ensured.sroa.0.i = alloca i16, align 1
@@ -126,3 +252,34 @@ declare void @llvm.dbg.declare(metadata, metadata, metadata)
!48 = !DILocalVariable(name: "v", scope: !41, file: !1, line: 15, type: !5)
!49 = !DILocalVariable(name: "d", scope: !41, file: !1, line: 15, type: !5)
!50 = !DILocalVariable(name: "u", scope: !41, file: !1, line: 16, type: !5)
+;.
+; MDEP: [[META0:![0-9]+]] = distinct !DICompileUnit(language: DW_LANG_C11, file: [[META1:![0-9]+]], producer: "{{.*}}clang version {{.*}}", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, splitDebugInlining: false, nameTableKind: None)
+; MDEP: [[META1]] = !DIFile(filename: "{{.*}}bbi-78272.c", directory: {{.*}})
+; MDEP: [[META4]] = !DILocalVariable(name: "t", scope: [[META5:![0-9]+]], file: [[META1]], line: 15, type: [[META8:![0-9]+]])
+; MDEP: [[META5]] = distinct !DISubprogram(name: "x", scope: [[META1]], file: [[META1]], line: 14, type: [[META6:![0-9]+]], scopeLine: 14, flags: DIFlagAllCallsDescribed, spFlags: DISPFlagLocalToUnit | DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META9:![0-9]+]])
+; MDEP: [[META6]] = !DISubroutineType(types: [[META7:![0-9]+]])
+; MDEP: [[META7]] = !{[[META8]]}
+; MDEP: [[META8]] = !DIBasicType(name: "int", size: 16, encoding: DW_ATE_signed)
+; MDEP: [[META9]] = !{[[META4]], [[META10]], [[META11]], [[META12]], [[META13]]}
+; MDEP: [[META10]] = !DILocalVariable(name: "c", scope: [[META5]], file: [[META1]], line: 15, type: [[META8]])
+; MDEP: [[META11]] = !DILocalVariable(name: "v", scope: [[META5]], file: [[META1]], line: 15, type: [[META8]])
+; MDEP: [[META12]] = !DILocalVariable(name: "d", scope: [[META5]], file: [[META1]], line: 15, type: [[META8]])
+; MDEP: [[META13]] = !DILocalVariable(name: "u", scope: [[META5]], file: [[META1]], line: 16, type: [[META8]])
+; MDEP: [[META14]] = !DILocation(line: 15, column: 7, scope: [[META5]])
+;.
+; MSSA: [[META0:![0-9]+]] = distinct !DICompileUnit(language: DW_LANG_C11, file: [[META1:![0-9]+]], producer: "{{.*}}clang version {{.*}}", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, splitDebugInlining: false, nameTableKind: None)
+; MSSA: [[META1]] = !DIFile(filename: "{{.*}}bbi-78272.c", directory: {{.*}})
+; MSSA: [[META4]] = !DILocalVariable(name: "t", scope: [[META5:![0-9]+]], file: [[META1]], line: 15, type: [[META8:![0-9]+]])
+; MSSA: [[META5]] = distinct !DISubprogram(name: "x", scope: [[META1]], file: [[META1]], line: 14, type: [[META6:![0-9]+]], scopeLine: 14, flags: DIFlagAllCallsDescribed, spFlags: DISPFlagLocalToUnit | DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]], retainedNodes: [[META9:![0-9]+]])
+; MSSA: [[META6]] = !DISubroutineType(types: [[META7:![0-9]+]])
+; MSSA: [[META7]] = !{[[META8]]}
+; MSSA: [[META8]] = !DIBasicType(name: "int", size: 16, encoding: DW_ATE_signed)
+; MSSA: [[META9]] = !{[[META4]], [[META10]], [[META11]], [[META12]], [[META13]]}
+; MSSA: [[META10]] = !DILocalVariable(name: "c", scope: [[META5]], file: [[META1]], line: 15, type: [[META8]])
+; MSSA: [[META11]] = !DILocalVariable(name: "v", scope: [[META5]], file: [[META1]], line: 15, type: [[META8]])
+; MSSA: [[META12]] = !DILocalVariable(name: "d", scope: [[META5]], file: [[META1]], line: 15, type: [[META8]])
+; MSSA: [[META13]] = !DILocalVariable(name: "u", scope: [[META5]], file: [[META1]], line: 16, type: [[META8]])
+; MSSA: [[META14]] = !DILocation(line: 15, column: 7, scope: [[META5]])
+;.
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/Transforms/GVN/PRE/pre-load-guards.ll b/llvm/test/Transforms/GVN/PRE/pre-load-guards.ll
index 1ca907d..ca1852f 100644
--- a/llvm/test/Transforms/GVN/PRE/pre-load-guards.ll
+++ b/llvm/test/Transforms/GVN/PRE/pre-load-guards.ll
@@ -1,4 +1,6 @@
-; RUN: opt < %s -passes=gvn -enable-load-pre -S | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt < %s -passes=gvn -enable-load-pre -S | FileCheck %s --check-prefixes=CHECK,MDEP
+; RUN: opt < %s -passes='gvn<memoryssa>' -enable-load-pre -S | FileCheck %s --check-prefixes=CHECK,MSSA
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
declare void @llvm.experimental.guard(i1, ...)
@@ -8,20 +10,33 @@ declare void @llvm.experimental.guard(i1, ...)
; the element in this case and deoptimize otherwise. If we hoist the load to a
; place above the guard, it will may lead to out-of-bound array access.
define i32 @test_motivation(ptr %p, ptr %q, i1 %C, i32 %index, i32 %len) {
-; CHECK-LABEL: @test_motivation(
+; CHECK-LABEL: define i32 @test_motivation(
+; CHECK-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], i1 [[C:%.*]], i32 [[INDEX:%.*]], i32 [[LEN:%.*]]) {
+; CHECK-NEXT: [[BLOCK1:.*:]]
+; CHECK-NEXT: [[EL1:%.*]] = getelementptr inbounds i32, ptr [[Q]], i32 [[INDEX]]
+; CHECK-NEXT: [[EL2:%.*]] = getelementptr inbounds i32, ptr [[P]], i32 [[INDEX]]
+; CHECK-NEXT: br i1 [[C]], label %[[BLOCK2:.*]], label %[[BLOCK3:.*]]
+; CHECK: [[BLOCK2]]:
+; CHECK-NEXT: br label %[[BLOCK4:.*]]
+; CHECK: [[BLOCK3]]:
+; CHECK-NEXT: store i32 0, ptr [[EL1]], align 4
+; CHECK-NEXT: br label %[[BLOCK4]]
+; CHECK: [[BLOCK4]]:
+; CHECK-NEXT: [[P2:%.*]] = phi ptr [ [[EL2]], %[[BLOCK3]] ], [ [[EL1]], %[[BLOCK2]] ]
+; CHECK-NEXT: [[COND1:%.*]] = icmp sge i32 [[INDEX]], 0
+; CHECK-NEXT: [[COND2:%.*]] = icmp slt i32 [[INDEX]], [[LEN]]
+; CHECK-NEXT: [[IN_BOUNDS:%.*]] = and i1 [[COND1]], [[COND2]]
+; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[IN_BOUNDS]]) [ "deopt"() ]
+; CHECK-NEXT: [[PRE:%.*]] = load i32, ptr [[P2]], align 4
+; CHECK-NEXT: ret i32 [[PRE]]
+;
block1:
%el1 = getelementptr inbounds i32, ptr %q, i32 %index
%el2 = getelementptr inbounds i32, ptr %p, i32 %index
- br i1 %C, label %block2, label %block3
+ br i1 %C, label %block2, label %block3
block2:
-; CHECK: block2:
-; CHECK-NEXT: br
-; CHECK-NOT: load
-; CHECK-NOT: sge
-; CHECK-NOT: slt
-; CHECK-NOT: and
br label %block4
block3:
@@ -30,13 +45,6 @@ block3:
block4:
-; CHECK: block4:
-; CHECK: %cond1 = icmp sge i32 %index, 0
-; CHECK-NEXT: %cond2 = icmp slt i32 %index, %len
-; CHECK-NEXT: %in.bounds = and i1 %cond1, %cond2
-; CHECK: call void (i1, ...) @llvm.experimental.guard(i1 %in.bounds)
-; CHECK-NEXT: %PRE = load i32, ptr %P2
-; CHECK: ret i32 %PRE
%P2 = phi ptr [%el2, %block3], [%el1, %block2]
%cond1 = icmp sge i32 %index, 0
@@ -49,17 +57,28 @@ block4:
; Guard in load's block that is above the load should prohibit the PRE.
define i32 @test_guard_01(ptr %p, ptr %q, i1 %C, i1 %G) {
-; CHECK-LABEL: @test_guard_01(
+; CHECK-LABEL: define i32 @test_guard_01(
+; CHECK-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], i1 [[C:%.*]], i1 [[G:%.*]]) {
+; CHECK-NEXT: [[BLOCK1:.*:]]
+; CHECK-NEXT: br i1 [[C]], label %[[BLOCK2:.*]], label %[[BLOCK3:.*]]
+; CHECK: [[BLOCK2]]:
+; CHECK-NEXT: br label %[[BLOCK4:.*]]
+; CHECK: [[BLOCK3]]:
+; CHECK-NEXT: store i32 0, ptr [[P]], align 4
+; CHECK-NEXT: br label %[[BLOCK4]]
+; CHECK: [[BLOCK4]]:
+; CHECK-NEXT: [[P2:%.*]] = phi ptr [ [[P]], %[[BLOCK3]] ], [ [[Q]], %[[BLOCK2]] ]
+; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[G]]) [ "deopt"() ]
+; CHECK-NEXT: [[PRE:%.*]] = load i32, ptr [[P2]], align 4
+; CHECK-NEXT: ret i32 [[PRE]]
+;
block1:
- br i1 %C, label %block2, label %block3
+ br i1 %C, label %block2, label %block3
block2:
-; CHECK: block2:
-; CHECK-NEXT: br
-; CHECK-NOT: load
- br label %block4
+ br label %block4
block3:
store i32 0, ptr %p
@@ -67,10 +86,6 @@ block3:
block4:
-; CHECK: block4:
-; CHECK: call void (i1, ...) @llvm.experimental.guard(i1 %G)
-; CHECK-NEXT: load
-; CHECK: ret i32
%P2 = phi ptr [%p, %block3], [%q, %block2]
call void (i1, ...) @llvm.experimental.guard(i1 %G) [ "deopt"() ]
@@ -80,16 +95,44 @@ block4:
; Guard in load's block that is below the load should not prohibit the PRE.
define i32 @test_guard_02(ptr %p, ptr %q, i1 %C, i1 %G) {
-; CHECK-LABEL: @test_guard_02(
+; MDEP-LABEL: define i32 @test_guard_02(
+; MDEP-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], i1 [[C:%.*]], i1 [[G:%.*]]) {
+; MDEP-NEXT: [[BLOCK1:.*:]]
+; MDEP-NEXT: br i1 [[C]], label %[[BLOCK2:.*]], label %[[BLOCK3:.*]]
+; MDEP: [[BLOCK2]]:
+; MDEP-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[Q]], align 4
+; MDEP-NEXT: br label %[[BLOCK4:.*]]
+; MDEP: [[BLOCK3]]:
+; MDEP-NEXT: store i32 0, ptr [[P]], align 4
+; MDEP-NEXT: br label %[[BLOCK4]]
+; MDEP: [[BLOCK4]]:
+; MDEP-NEXT: [[PRE:%.*]] = phi i32 [ 0, %[[BLOCK3]] ], [ [[PRE_PRE]], %[[BLOCK2]] ]
+; MDEP-NEXT: [[P2:%.*]] = phi ptr [ [[P]], %[[BLOCK3]] ], [ [[Q]], %[[BLOCK2]] ]
+; MDEP-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[G]]) [ "deopt"() ]
+; MDEP-NEXT: ret i32 [[PRE]]
+;
+; MSSA-LABEL: define i32 @test_guard_02(
+; MSSA-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], i1 [[C:%.*]], i1 [[G:%.*]]) {
+; MSSA-NEXT: [[BLOCK1:.*:]]
+; MSSA-NEXT: br i1 [[C]], label %[[BLOCK2:.*]], label %[[BLOCK3:.*]]
+; MSSA: [[BLOCK2]]:
+; MSSA-NEXT: br label %[[BLOCK4:.*]]
+; MSSA: [[BLOCK3]]:
+; MSSA-NEXT: store i32 0, ptr [[P]], align 4
+; MSSA-NEXT: br label %[[BLOCK4]]
+; MSSA: [[BLOCK4]]:
+; MSSA-NEXT: [[P2:%.*]] = phi ptr [ [[P]], %[[BLOCK3]] ], [ [[Q]], %[[BLOCK2]] ]
+; MSSA-NEXT: [[PRE:%.*]] = load i32, ptr [[P2]], align 4
+; MSSA-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[G]]) [ "deopt"() ]
+; MSSA-NEXT: ret i32 [[PRE]]
+;
block1:
- br i1 %C, label %block2, label %block3
+ br i1 %C, label %block2, label %block3
block2:
-; CHECK: block2:
-; CHECK-NEXT: load i32, ptr %q
- br label %block4
+ br label %block4
block3:
store i32 0, ptr %p
@@ -97,12 +140,6 @@ block3:
block4:
-; CHECK: block4:
-; CHECK-NEXT: phi i32 [
-; CHECK-NEXT: phi ptr [
-; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 %G)
-; CHECK-NOT: load
-; CHECK: ret i32
%P2 = phi ptr [%p, %block3], [%q, %block2]
%PRE = load i32, ptr %P2
@@ -112,17 +149,28 @@ block4:
; Guard above the load's block should prevent PRE from hoisting through it.
define i32 @test_guard_03(ptr %p, ptr %q, i1 %C, i1 %G) {
-; CHECK-LABEL: @test_guard_03(
+; CHECK-LABEL: define i32 @test_guard_03(
+; CHECK-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], i1 [[C:%.*]], i1 [[G:%.*]]) {
+; CHECK-NEXT: [[BLOCK1:.*:]]
+; CHECK-NEXT: br i1 [[C]], label %[[BLOCK2:.*]], label %[[BLOCK3:.*]]
+; CHECK: [[BLOCK2]]:
+; CHECK-NEXT: br label %[[BLOCK4:.*]]
+; CHECK: [[BLOCK3]]:
+; CHECK-NEXT: store i32 0, ptr [[P]], align 4
+; CHECK-NEXT: br label %[[BLOCK4]]
+; CHECK: [[BLOCK4]]:
+; CHECK-NEXT: [[P2:%.*]] = phi ptr [ [[P]], %[[BLOCK3]] ], [ [[Q]], %[[BLOCK2]] ]
+; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[G]]) [ "deopt"() ]
+; CHECK-NEXT: [[PRE:%.*]] = load i32, ptr [[P2]], align 4
+; CHECK-NEXT: ret i32 [[PRE]]
+;
block1:
- br i1 %C, label %block2, label %block3
+ br i1 %C, label %block2, label %block3
block2:
-; CHECK: block2:
-; CHECK-NEXT: br
-; CHECK-NOT: load
- br label %block4
+ br label %block4
block3:
store i32 0, ptr %p
@@ -130,11 +178,6 @@ block3:
block4:
-; CHECK: block4:
-; CHECK-NEXT: phi ptr
-; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 %G)
-; CHECK-NEXT: load
-; CHECK-NEXT: ret i32
%P2 = phi ptr [%p, %block3], [%q, %block2]
call void (i1, ...) @llvm.experimental.guard(i1 %G) [ "deopt"() ]
diff --git a/llvm/test/Transforms/GVN/PRE/pre-load-implicit-cf-updates.ll b/llvm/test/Transforms/GVN/PRE/pre-load-implicit-cf-updates.ll
index 0585781..17fbc0e 100644
--- a/llvm/test/Transforms/GVN/PRE/pre-load-implicit-cf-updates.ll
+++ b/llvm/test/Transforms/GVN/PRE/pre-load-implicit-cf-updates.ll
@@ -1,4 +1,6 @@
-; RUN: opt -S -passes=gvn -enable-load-pre < %s | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S -passes=gvn -enable-load-pre < %s | FileCheck %s --check-prefixes=CHECK,MDEP
+; RUN: opt -S -passes='gvn<memoryssa>' -enable-load-pre < %s | FileCheck %s --check-prefixes=CHECK,MSSA
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
@@ -9,18 +11,28 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
declare i32 @foo(i32 %arg) #0
define hidden void @test_01(i32 %x, i32 %y) {
-
; c2 only throws if c1 throws, so it can be safely removed and then PRE can
; hoist the load out of loop.
-
-; CHECK-LABEL: @test_01
-; CHECK: entry:
-; CHECK-NEXT: %c1 = call i32 @foo(i32 %x)
-; CHECK-NEXT: %val.pre = load i32, ptr null, align 8
-; CHECK-NEXT: br label %loop
-; CHECK: loop:
-; CHECK-NEXT: %c3 = call i32 @foo(i32 %val.pre)
-; CHECK-NEXT: br label %loop
+; MDEP-LABEL: define hidden void @test_01(
+; MDEP-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; MDEP-NEXT: [[ENTRY:.*:]]
+; MDEP-NEXT: [[C1:%.*]] = call i32 @foo(i32 [[X]])
+; MDEP-NEXT: [[VAL_PRE:%.*]] = load i32, ptr null, align 8
+; MDEP-NEXT: br label %[[LOOP:.*]]
+; MDEP: [[LOOP]]:
+; MDEP-NEXT: [[C3:%.*]] = call i32 @foo(i32 [[VAL_PRE]])
+; MDEP-NEXT: br label %[[LOOP]]
+;
+; MSSA-LABEL: define hidden void @test_01(
+; MSSA-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; MSSA-NEXT: [[ENTRY:.*:]]
+; MSSA-NEXT: [[C1:%.*]] = call i32 @foo(i32 [[X]])
+; MSSA-NEXT: br label %[[LOOP:.*]]
+; MSSA: [[LOOP]]:
+; MSSA-NEXT: [[VAL:%.*]] = load i32, ptr null, align 8
+; MSSA-NEXT: [[C3:%.*]] = call i32 @foo(i32 [[VAL]])
+; MSSA-NEXT: br label %[[LOOP]]
+;
entry:
%c1 = call i32 @foo(i32 %x)
@@ -34,18 +46,18 @@ loop:
}
define hidden void @test_02(i32 %x, i32 %y) {
-
; PRE is not allowed because c2 may throw.
-
-; CHECK-LABEL: @test_02
-; CHECK: entry:
-; CHECK-NEXT: %c1 = call i32 @foo(i32 %x)
-; CHECK-NEXT: br label %loop
-; CHECK: loop:
-; CHECK-NEXT: %c2 = call i32 @foo(i32 %y)
-; CHECK-NEXT: %val = load i32, ptr null, align 8
-; CHECK-NEXT: %c3 = call i32 @foo(i32 %val)
-; CHECK-NEXT: br label %loop
+; CHECK-LABEL: define hidden void @test_02(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[C1:%.*]] = call i32 @foo(i32 [[X]])
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[C2:%.*]] = call i32 @foo(i32 [[Y]])
+; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr null, align 8
+; CHECK-NEXT: [[C3:%.*]] = call i32 @foo(i32 [[VAL]])
+; CHECK-NEXT: br label %[[LOOP]]
+;
entry:
%c1 = call i32 @foo(i32 %x)
@@ -59,19 +71,31 @@ loop:
}
define hidden void @test_03(i32 %x, i32 %y) {
-
; PRE of load is allowed because c2 only throws if c1 throws. c3 should
; not be eliminated. c4 is eliminated because it only throws if c3 throws.
-
-; CHECK-LABEL: @test_03
-; CHECK: entry:
-; CHECK-NEXT: %c1 = call i32 @foo(i32 %x)
-; CHECK-NEXT: %val.pre = load i32, ptr null, align 8
-; CHECK-NEXT: br label %loop
-; CHECK: loop:
-; CHECK-NEXT: %c3 = call i32 @foo(i32 %y)
-; CHECK-NEXT: %c5 = call i32 @foo(i32 %val.pre)
-; CHECK-NEXT: br label %loop
+; MDEP-LABEL: define hidden void @test_03(
+; MDEP-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; MDEP-NEXT: [[ENTRY:.*:]]
+; MDEP-NEXT: [[C1:%.*]] = call i32 @foo(i32 [[X]])
+; MDEP-NEXT: [[VAL_PRE:%.*]] = load i32, ptr null, align 8
+; MDEP-NEXT: br label %[[LOOP:.*]]
+; MDEP: [[LOOP]]:
+; MDEP-NEXT: [[C3:%.*]] = call i32 @foo(i32 [[Y]])
+; MDEP-NEXT: [[C5:%.*]] = call i32 @foo(i32 [[VAL_PRE]])
+; MDEP-NEXT: br label %[[LOOP]]
+;
+; MSSA-LABEL: define hidden void @test_03(
+; MSSA-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; MSSA-NEXT: [[ENTRY:.*:]]
+; MSSA-NEXT: [[C1:%.*]] = call i32 @foo(i32 [[X]])
+; MSSA-NEXT: br label %[[LOOP:.*]]
+; MSSA: [[LOOP]]:
+; MSSA-NEXT: [[VAL:%.*]] = load i32, ptr null, align 8
+; MSSA-NEXT: [[C3:%.*]] = call i32 @foo(i32 [[Y]])
+; MSSA-NEXT: [[VAL2:%.*]] = load i32, ptr null, align 8
+; MSSA-NEXT: [[C5:%.*]] = call i32 @foo(i32 [[VAL]])
+; MSSA-NEXT: br label %[[LOOP]]
+;
entry:
%c1 = call i32 @foo(i32 %x)
@@ -88,18 +112,18 @@ loop:
}
define hidden void @test_04(i32 %x, i32 %y) {
-
; PRE is not allowed even after we remove c2 because now c3 prevents us from it.
-
-; CHECK-LABEL: @test_04
-; CHECK: entry:
-; CHECK-NEXT: %c1 = call i32 @foo(i32 %x)
-; CHECK-NEXT: br label %loop
-; CHECK: loop:
-; CHECK-NEXT: %c3 = call i32 @foo(i32 %y)
-; CHECK-NEXT: %val = load i32, ptr null, align 8
-; CHECK-NEXT: %c5 = call i32 @foo(i32 %val)
-; CHECK-NEXT: br label %loop
+; CHECK-LABEL: define hidden void @test_04(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[C1:%.*]] = call i32 @foo(i32 [[X]])
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[C3:%.*]] = call i32 @foo(i32 [[Y]])
+; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr null, align 8
+; CHECK-NEXT: [[C5:%.*]] = call i32 @foo(i32 [[VAL]])
+; CHECK-NEXT: br label %[[LOOP]]
+;
entry:
%c1 = call i32 @foo(i32 %x)
diff --git a/llvm/test/Transforms/GVN/PRE/pre-load.ll b/llvm/test/Transforms/GVN/PRE/pre-load.ll
index bbd20bc..5a07f9f 100644
--- a/llvm/test/Transforms/GVN/PRE/pre-load.ll
+++ b/llvm/test/Transforms/GVN/PRE/pre-load.ll
@@ -1,21 +1,34 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt < %s -passes=gvn -enable-load-pre -S | FileCheck %s
+; RUN: opt < %s -passes=gvn -enable-load-pre -S | FileCheck %s --check-prefixes=CHECK,MDEP
+; RUN: opt < %s -passes='gvn<memoryssa>' -enable-load-pre -S | FileCheck %s --check-prefixes=CHECK,MSSA
; RUN: opt < %s -aa-pipeline=basic-aa -passes="gvn<load-pre>" -enable-load-pre=false -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
define i32 @test1(ptr %p, i1 %C) {
-; CHECK-LABEL: @test1(
-; CHECK-NEXT: block1:
-; CHECK-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
-; CHECK: block2:
-; CHECK-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[P:%.*]], align 4
-; CHECK-NEXT: br label [[BLOCK4:%.*]]
-; CHECK: block3:
-; CHECK-NEXT: store i32 0, ptr [[P]], align 4
-; CHECK-NEXT: br label [[BLOCK4]]
-; CHECK: block4:
-; CHECK-NEXT: [[PRE:%.*]] = phi i32 [ 0, [[BLOCK3]] ], [ [[PRE_PRE]], [[BLOCK2]] ]
-; CHECK-NEXT: ret i32 [[PRE]]
+; MDEP-LABEL: @test1(
+; MDEP-NEXT: block1:
+; MDEP-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
+; MDEP: block2:
+; MDEP-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[P:%.*]], align 4
+; MDEP-NEXT: br label [[BLOCK4:%.*]]
+; MDEP: block3:
+; MDEP-NEXT: store i32 0, ptr [[P]], align 4
+; MDEP-NEXT: br label [[BLOCK4]]
+; MDEP: block4:
+; MDEP-NEXT: [[PRE:%.*]] = phi i32 [ 0, [[BLOCK3]] ], [ [[PRE_PRE]], [[BLOCK2]] ]
+; MDEP-NEXT: ret i32 [[PRE]]
+;
+; MSSA-LABEL: @test1(
+; MSSA-NEXT: block1:
+; MSSA-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
+; MSSA: block2:
+; MSSA-NEXT: br label [[BLOCK4:%.*]]
+; MSSA: block3:
+; MSSA-NEXT: store i32 0, ptr [[P:%.*]], align 4
+; MSSA-NEXT: br label [[BLOCK4]]
+; MSSA: block4:
+; MSSA-NEXT: [[PRE:%.*]] = load i32, ptr [[P]], align 4
+; MSSA-NEXT: ret i32 [[PRE]]
;
block1:
br i1 %C, label %block2, label %block3
@@ -34,19 +47,32 @@ block4:
; This is a simple phi translation case.
define i32 @test2(ptr %p, ptr %q, i1 %C) {
-; CHECK-LABEL: @test2(
-; CHECK-NEXT: block1:
-; CHECK-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
-; CHECK: block2:
-; CHECK-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[Q:%.*]], align 4
-; CHECK-NEXT: br label [[BLOCK4:%.*]]
-; CHECK: block3:
-; CHECK-NEXT: store i32 0, ptr [[P:%.*]], align 4
-; CHECK-NEXT: br label [[BLOCK4]]
-; CHECK: block4:
-; CHECK-NEXT: [[PRE:%.*]] = phi i32 [ 0, [[BLOCK3]] ], [ [[PRE_PRE]], [[BLOCK2]] ]
-; CHECK-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q]], [[BLOCK2]] ]
-; CHECK-NEXT: ret i32 [[PRE]]
+; MDEP-LABEL: @test2(
+; MDEP-NEXT: block1:
+; MDEP-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
+; MDEP: block2:
+; MDEP-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[Q:%.*]], align 4
+; MDEP-NEXT: br label [[BLOCK4:%.*]]
+; MDEP: block3:
+; MDEP-NEXT: store i32 0, ptr [[P:%.*]], align 4
+; MDEP-NEXT: br label [[BLOCK4]]
+; MDEP: block4:
+; MDEP-NEXT: [[PRE:%.*]] = phi i32 [ 0, [[BLOCK3]] ], [ [[PRE_PRE]], [[BLOCK2]] ]
+; MDEP-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q]], [[BLOCK2]] ]
+; MDEP-NEXT: ret i32 [[PRE]]
+;
+; MSSA-LABEL: @test2(
+; MSSA-NEXT: block1:
+; MSSA-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
+; MSSA: block2:
+; MSSA-NEXT: br label [[BLOCK4:%.*]]
+; MSSA: block3:
+; MSSA-NEXT: store i32 0, ptr [[P:%.*]], align 4
+; MSSA-NEXT: br label [[BLOCK4]]
+; MSSA: block4:
+; MSSA-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q:%.*]], [[BLOCK2]] ]
+; MSSA-NEXT: [[PRE:%.*]] = load i32, ptr [[P2]], align 4
+; MSSA-NEXT: ret i32 [[PRE]]
;
block1:
br i1 %C, label %block2, label %block3
@@ -66,23 +92,40 @@ block4:
; This is a PRE case that requires phi translation through a GEP.
define i32 @test3(ptr %p, ptr %q, ptr %Hack, i1 %C) {
-; CHECK-LABEL: @test3(
-; CHECK-NEXT: block1:
-; CHECK-NEXT: [[B:%.*]] = getelementptr i32, ptr [[Q:%.*]], i32 1
-; CHECK-NEXT: store ptr [[B]], ptr [[HACK:%.*]], align 8
-; CHECK-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
-; CHECK: block2:
-; CHECK-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[B]], align 4
-; CHECK-NEXT: br label [[BLOCK4:%.*]]
-; CHECK: block3:
-; CHECK-NEXT: [[A:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 1
-; CHECK-NEXT: store i32 0, ptr [[A]], align 4
-; CHECK-NEXT: br label [[BLOCK4]]
-; CHECK: block4:
-; CHECK-NEXT: [[PRE:%.*]] = phi i32 [ 0, [[BLOCK3]] ], [ [[PRE_PRE]], [[BLOCK2]] ]
-; CHECK-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q]], [[BLOCK2]] ]
-; CHECK-NEXT: [[P3:%.*]] = getelementptr i32, ptr [[P2]], i32 1
-; CHECK-NEXT: ret i32 [[PRE]]
+; MDEP-LABEL: @test3(
+; MDEP-NEXT: block1:
+; MDEP-NEXT: [[B:%.*]] = getelementptr i32, ptr [[Q:%.*]], i32 1
+; MDEP-NEXT: store ptr [[B]], ptr [[HACK:%.*]], align 8
+; MDEP-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
+; MDEP: block2:
+; MDEP-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[B]], align 4
+; MDEP-NEXT: br label [[BLOCK4:%.*]]
+; MDEP: block3:
+; MDEP-NEXT: [[A:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 1
+; MDEP-NEXT: store i32 0, ptr [[A]], align 4
+; MDEP-NEXT: br label [[BLOCK4]]
+; MDEP: block4:
+; MDEP-NEXT: [[PRE:%.*]] = phi i32 [ 0, [[BLOCK3]] ], [ [[PRE_PRE]], [[BLOCK2]] ]
+; MDEP-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q]], [[BLOCK2]] ]
+; MDEP-NEXT: [[P3:%.*]] = getelementptr i32, ptr [[P2]], i32 1
+; MDEP-NEXT: ret i32 [[PRE]]
+;
+; MSSA-LABEL: @test3(
+; MSSA-NEXT: block1:
+; MSSA-NEXT: [[B:%.*]] = getelementptr i32, ptr [[Q:%.*]], i32 1
+; MSSA-NEXT: store ptr [[B]], ptr [[HACK:%.*]], align 8
+; MSSA-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
+; MSSA: block2:
+; MSSA-NEXT: br label [[BLOCK4:%.*]]
+; MSSA: block3:
+; MSSA-NEXT: [[A:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 1
+; MSSA-NEXT: store i32 0, ptr [[A]], align 4
+; MSSA-NEXT: br label [[BLOCK4]]
+; MSSA: block4:
+; MSSA-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q]], [[BLOCK2]] ]
+; MSSA-NEXT: [[P3:%.*]] = getelementptr i32, ptr [[P2]], i32 1
+; MSSA-NEXT: [[PRE:%.*]] = load i32, ptr [[P3]], align 4
+; MSSA-NEXT: ret i32 [[PRE]]
;
block1:
%B = getelementptr i32, ptr %q, i32 1
@@ -107,24 +150,41 @@ block4:
;; Here the loaded address is available, but the computation is in 'block3'
;; which does not dominate 'block2'.
define i32 @test4(ptr %p, ptr %q, ptr %Hack, i1 %C) {
-; CHECK-LABEL: @test4(
-; CHECK-NEXT: block1:
-; CHECK-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
-; CHECK: block2:
-; CHECK-NEXT: [[P3_PHI_TRANS_INSERT:%.*]] = getelementptr i32, ptr [[Q:%.*]], i32 1
-; CHECK-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[P3_PHI_TRANS_INSERT]], align 4
-; CHECK-NEXT: br label [[BLOCK4:%.*]]
-; CHECK: block3:
-; CHECK-NEXT: [[B:%.*]] = getelementptr i32, ptr [[Q]], i32 1
-; CHECK-NEXT: store ptr [[B]], ptr [[HACK:%.*]], align 8
-; CHECK-NEXT: [[A:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 1
-; CHECK-NEXT: store i32 0, ptr [[A]], align 4
-; CHECK-NEXT: br label [[BLOCK4]]
-; CHECK: block4:
-; CHECK-NEXT: [[PRE:%.*]] = phi i32 [ 0, [[BLOCK3]] ], [ [[PRE_PRE]], [[BLOCK2]] ]
-; CHECK-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q]], [[BLOCK2]] ]
-; CHECK-NEXT: [[P3:%.*]] = getelementptr i32, ptr [[P2]], i32 1
-; CHECK-NEXT: ret i32 [[PRE]]
+; MDEP-LABEL: @test4(
+; MDEP-NEXT: block1:
+; MDEP-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
+; MDEP: block2:
+; MDEP-NEXT: [[P3_PHI_TRANS_INSERT:%.*]] = getelementptr i32, ptr [[Q:%.*]], i32 1
+; MDEP-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[P3_PHI_TRANS_INSERT]], align 4
+; MDEP-NEXT: br label [[BLOCK4:%.*]]
+; MDEP: block3:
+; MDEP-NEXT: [[B:%.*]] = getelementptr i32, ptr [[Q]], i32 1
+; MDEP-NEXT: store ptr [[B]], ptr [[HACK:%.*]], align 8
+; MDEP-NEXT: [[A:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 1
+; MDEP-NEXT: store i32 0, ptr [[A]], align 4
+; MDEP-NEXT: br label [[BLOCK4]]
+; MDEP: block4:
+; MDEP-NEXT: [[PRE:%.*]] = phi i32 [ 0, [[BLOCK3]] ], [ [[PRE_PRE]], [[BLOCK2]] ]
+; MDEP-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q]], [[BLOCK2]] ]
+; MDEP-NEXT: [[P3:%.*]] = getelementptr i32, ptr [[P2]], i32 1
+; MDEP-NEXT: ret i32 [[PRE]]
+;
+; MSSA-LABEL: @test4(
+; MSSA-NEXT: block1:
+; MSSA-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
+; MSSA: block2:
+; MSSA-NEXT: br label [[BLOCK4:%.*]]
+; MSSA: block3:
+; MSSA-NEXT: [[B:%.*]] = getelementptr i32, ptr [[Q:%.*]], i32 1
+; MSSA-NEXT: store ptr [[B]], ptr [[HACK:%.*]], align 8
+; MSSA-NEXT: [[A:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 1
+; MSSA-NEXT: store i32 0, ptr [[A]], align 4
+; MSSA-NEXT: br label [[BLOCK4]]
+; MSSA: block4:
+; MSSA-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q]], [[BLOCK2]] ]
+; MSSA-NEXT: [[P3:%.*]] = getelementptr i32, ptr [[P2]], i32 1
+; MSSA-NEXT: [[PRE:%.*]] = load i32, ptr [[P3]], align 4
+; MSSA-NEXT: ret i32 [[PRE]]
;
block1:
br i1 %C, label %block2, label %block3
@@ -149,24 +209,41 @@ block4:
; Same as test4, with a nuw flag on the GEP.
define i32 @test4_nuw(ptr %p, ptr %q, ptr %Hack, i1 %C) {
-; CHECK-LABEL: @test4_nuw(
-; CHECK-NEXT: block1:
-; CHECK-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
-; CHECK: block2:
-; CHECK-NEXT: [[P3_PHI_TRANS_INSERT:%.*]] = getelementptr nuw i32, ptr [[Q:%.*]], i32 1
-; CHECK-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[P3_PHI_TRANS_INSERT]], align 4
-; CHECK-NEXT: br label [[BLOCK4:%.*]]
-; CHECK: block3:
-; CHECK-NEXT: [[B:%.*]] = getelementptr i32, ptr [[Q]], i32 1
-; CHECK-NEXT: store ptr [[B]], ptr [[HACK:%.*]], align 8
-; CHECK-NEXT: [[A:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 1
-; CHECK-NEXT: store i32 0, ptr [[A]], align 4
-; CHECK-NEXT: br label [[BLOCK4]]
-; CHECK: block4:
-; CHECK-NEXT: [[PRE:%.*]] = phi i32 [ 0, [[BLOCK3]] ], [ [[PRE_PRE]], [[BLOCK2]] ]
-; CHECK-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q]], [[BLOCK2]] ]
-; CHECK-NEXT: [[P3:%.*]] = getelementptr nuw i32, ptr [[P2]], i32 1
-; CHECK-NEXT: ret i32 [[PRE]]
+; MDEP-LABEL: @test4_nuw(
+; MDEP-NEXT: block1:
+; MDEP-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
+; MDEP: block2:
+; MDEP-NEXT: [[P3_PHI_TRANS_INSERT:%.*]] = getelementptr nuw i32, ptr [[Q:%.*]], i32 1
+; MDEP-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[P3_PHI_TRANS_INSERT]], align 4
+; MDEP-NEXT: br label [[BLOCK4:%.*]]
+; MDEP: block3:
+; MDEP-NEXT: [[B:%.*]] = getelementptr i32, ptr [[Q]], i32 1
+; MDEP-NEXT: store ptr [[B]], ptr [[HACK:%.*]], align 8
+; MDEP-NEXT: [[A:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 1
+; MDEP-NEXT: store i32 0, ptr [[A]], align 4
+; MDEP-NEXT: br label [[BLOCK4]]
+; MDEP: block4:
+; MDEP-NEXT: [[PRE:%.*]] = phi i32 [ 0, [[BLOCK3]] ], [ [[PRE_PRE]], [[BLOCK2]] ]
+; MDEP-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q]], [[BLOCK2]] ]
+; MDEP-NEXT: [[P3:%.*]] = getelementptr nuw i32, ptr [[P2]], i32 1
+; MDEP-NEXT: ret i32 [[PRE]]
+;
+; MSSA-LABEL: @test4_nuw(
+; MSSA-NEXT: block1:
+; MSSA-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
+; MSSA: block2:
+; MSSA-NEXT: br label [[BLOCK4:%.*]]
+; MSSA: block3:
+; MSSA-NEXT: [[B:%.*]] = getelementptr i32, ptr [[Q:%.*]], i32 1
+; MSSA-NEXT: store ptr [[B]], ptr [[HACK:%.*]], align 8
+; MSSA-NEXT: [[A:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 1
+; MSSA-NEXT: store i32 0, ptr [[A]], align 4
+; MSSA-NEXT: br label [[BLOCK4]]
+; MSSA: block4:
+; MSSA-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q]], [[BLOCK2]] ]
+; MSSA-NEXT: [[P3:%.*]] = getelementptr nuw i32, ptr [[P2]], i32 1
+; MSSA-NEXT: [[PRE:%.*]] = load i32, ptr [[P3]], align 4
+; MSSA-NEXT: ret i32 [[PRE]]
;
block1:
br i1 %C, label %block2, label %block3
@@ -196,28 +273,50 @@ block4:
;}
define void @test5(i32 %N, ptr nocapture %G) nounwind ssp {
-; CHECK-LABEL: @test5(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[N:%.*]], -1
-; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[TMP0]], 0
-; CHECK-NEXT: br i1 [[TMP1]], label [[BB_NPH:%.*]], label [[RETURN:%.*]]
-; CHECK: bb.nph:
-; CHECK-NEXT: [[TMP:%.*]] = zext i32 [[TMP0]] to i64
-; CHECK-NEXT: [[DOTPRE:%.*]] = load double, ptr [[G:%.*]], align 8
-; CHECK-NEXT: br label [[BB:%.*]]
-; CHECK: bb:
-; CHECK-NEXT: [[TMP2:%.*]] = phi double [ [[DOTPRE]], [[BB_NPH]] ], [ [[TMP3:%.*]], [[BB]] ]
-; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP6:%.*]], [[BB]] ]
-; CHECK-NEXT: [[TMP6]] = add i64 [[INDVAR]], 1
-; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP6]]
-; CHECK-NEXT: [[SCEVGEP7:%.*]] = getelementptr double, ptr [[G]], i64 [[INDVAR]]
-; CHECK-NEXT: [[TMP3]] = load double, ptr [[SCEVGEP]], align 8
-; CHECK-NEXT: [[TMP4:%.*]] = fadd double [[TMP2]], [[TMP3]]
-; CHECK-NEXT: store double [[TMP4]], ptr [[SCEVGEP7]], align 8
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP6]], [[TMP]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]]
-; CHECK: return:
-; CHECK-NEXT: ret void
+; MDEP-LABEL: @test5(
+; MDEP-NEXT: entry:
+; MDEP-NEXT: [[TMP0:%.*]] = add i32 [[N:%.*]], -1
+; MDEP-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[TMP0]], 0
+; MDEP-NEXT: br i1 [[TMP1]], label [[BB_NPH:%.*]], label [[RETURN:%.*]]
+; MDEP: bb.nph:
+; MDEP-NEXT: [[TMP:%.*]] = zext i32 [[TMP0]] to i64
+; MDEP-NEXT: [[DOTPRE:%.*]] = load double, ptr [[G:%.*]], align 8
+; MDEP-NEXT: br label [[BB:%.*]]
+; MDEP: bb:
+; MDEP-NEXT: [[TMP2:%.*]] = phi double [ [[DOTPRE]], [[BB_NPH]] ], [ [[TMP3:%.*]], [[BB]] ]
+; MDEP-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP6:%.*]], [[BB]] ]
+; MDEP-NEXT: [[TMP6]] = add i64 [[INDVAR]], 1
+; MDEP-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP6]]
+; MDEP-NEXT: [[SCEVGEP7:%.*]] = getelementptr double, ptr [[G]], i64 [[INDVAR]]
+; MDEP-NEXT: [[TMP3]] = load double, ptr [[SCEVGEP]], align 8
+; MDEP-NEXT: [[TMP4:%.*]] = fadd double [[TMP2]], [[TMP3]]
+; MDEP-NEXT: store double [[TMP4]], ptr [[SCEVGEP7]], align 8
+; MDEP-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP6]], [[TMP]]
+; MDEP-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]]
+; MDEP: return:
+; MDEP-NEXT: ret void
+;
+; MSSA-LABEL: @test5(
+; MSSA-NEXT: entry:
+; MSSA-NEXT: [[TMP0:%.*]] = add i32 [[N:%.*]], -1
+; MSSA-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[TMP0]], 0
+; MSSA-NEXT: br i1 [[TMP1]], label [[BB_NPH:%.*]], label [[RETURN:%.*]]
+; MSSA: bb.nph:
+; MSSA-NEXT: [[TMP:%.*]] = zext i32 [[TMP0]] to i64
+; MSSA-NEXT: br label [[BB:%.*]]
+; MSSA: bb:
+; MSSA-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP6:%.*]], [[BB]] ]
+; MSSA-NEXT: [[TMP6]] = add i64 [[INDVAR]], 1
+; MSSA-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G:%.*]], i64 [[TMP6]]
+; MSSA-NEXT: [[SCEVGEP7:%.*]] = getelementptr double, ptr [[G]], i64 [[INDVAR]]
+; MSSA-NEXT: [[TMP2:%.*]] = load double, ptr [[SCEVGEP7]], align 8
+; MSSA-NEXT: [[TMP3:%.*]] = load double, ptr [[SCEVGEP]], align 8
+; MSSA-NEXT: [[TMP4:%.*]] = fadd double [[TMP2]], [[TMP3]]
+; MSSA-NEXT: store double [[TMP4]], ptr [[SCEVGEP7]], align 8
+; MSSA-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP6]], [[TMP]]
+; MSSA-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]]
+; MSSA: return:
+; MSSA-NEXT: ret void
;
entry:
%0 = add i32 %N, -1
@@ -254,28 +353,50 @@ return:
;}
define void @test6(i32 %N, ptr nocapture %G) nounwind ssp {
-; CHECK-LABEL: @test6(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[N:%.*]], -1
-; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[TMP0]], 0
-; CHECK-NEXT: br i1 [[TMP1]], label [[BB_NPH:%.*]], label [[RETURN:%.*]]
-; CHECK: bb.nph:
-; CHECK-NEXT: [[TMP:%.*]] = zext i32 [[TMP0]] to i64
-; CHECK-NEXT: [[DOTPRE:%.*]] = load double, ptr [[G:%.*]], align 8
-; CHECK-NEXT: br label [[BB:%.*]]
-; CHECK: bb:
-; CHECK-NEXT: [[TMP2:%.*]] = phi double [ [[DOTPRE]], [[BB_NPH]] ], [ [[TMP4:%.*]], [[BB]] ]
-; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP6:%.*]], [[BB]] ]
-; CHECK-NEXT: [[TMP6]] = add i64 [[INDVAR]], 1
-; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP6]]
-; CHECK-NEXT: [[SCEVGEP7:%.*]] = getelementptr double, ptr [[G]], i64 [[INDVAR]]
-; CHECK-NEXT: [[TMP3:%.*]] = load double, ptr [[SCEVGEP]], align 8
-; CHECK-NEXT: [[TMP4]] = fadd double [[TMP2]], [[TMP3]]
-; CHECK-NEXT: store double [[TMP4]], ptr [[SCEVGEP]], align 8
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP6]], [[TMP]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]]
-; CHECK: return:
-; CHECK-NEXT: ret void
+; MDEP-LABEL: @test6(
+; MDEP-NEXT: entry:
+; MDEP-NEXT: [[TMP0:%.*]] = add i32 [[N:%.*]], -1
+; MDEP-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[TMP0]], 0
+; MDEP-NEXT: br i1 [[TMP1]], label [[BB_NPH:%.*]], label [[RETURN:%.*]]
+; MDEP: bb.nph:
+; MDEP-NEXT: [[TMP:%.*]] = zext i32 [[TMP0]] to i64
+; MDEP-NEXT: [[DOTPRE:%.*]] = load double, ptr [[G:%.*]], align 8
+; MDEP-NEXT: br label [[BB:%.*]]
+; MDEP: bb:
+; MDEP-NEXT: [[TMP2:%.*]] = phi double [ [[DOTPRE]], [[BB_NPH]] ], [ [[TMP4:%.*]], [[BB]] ]
+; MDEP-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP6:%.*]], [[BB]] ]
+; MDEP-NEXT: [[TMP6]] = add i64 [[INDVAR]], 1
+; MDEP-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP6]]
+; MDEP-NEXT: [[SCEVGEP7:%.*]] = getelementptr double, ptr [[G]], i64 [[INDVAR]]
+; MDEP-NEXT: [[TMP3:%.*]] = load double, ptr [[SCEVGEP]], align 8
+; MDEP-NEXT: [[TMP4]] = fadd double [[TMP2]], [[TMP3]]
+; MDEP-NEXT: store double [[TMP4]], ptr [[SCEVGEP]], align 8
+; MDEP-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP6]], [[TMP]]
+; MDEP-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]]
+; MDEP: return:
+; MDEP-NEXT: ret void
+;
+; MSSA-LABEL: @test6(
+; MSSA-NEXT: entry:
+; MSSA-NEXT: [[TMP0:%.*]] = add i32 [[N:%.*]], -1
+; MSSA-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[TMP0]], 0
+; MSSA-NEXT: br i1 [[TMP1]], label [[BB_NPH:%.*]], label [[RETURN:%.*]]
+; MSSA: bb.nph:
+; MSSA-NEXT: [[TMP:%.*]] = zext i32 [[TMP0]] to i64
+; MSSA-NEXT: br label [[BB:%.*]]
+; MSSA: bb:
+; MSSA-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP6:%.*]], [[BB]] ]
+; MSSA-NEXT: [[TMP6]] = add i64 [[INDVAR]], 1
+; MSSA-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G:%.*]], i64 [[TMP6]]
+; MSSA-NEXT: [[SCEVGEP7:%.*]] = getelementptr double, ptr [[G]], i64 [[INDVAR]]
+; MSSA-NEXT: [[TMP2:%.*]] = load double, ptr [[SCEVGEP7]], align 8
+; MSSA-NEXT: [[TMP3:%.*]] = load double, ptr [[SCEVGEP]], align 8
+; MSSA-NEXT: [[TMP4:%.*]] = fadd double [[TMP2]], [[TMP3]]
+; MSSA-NEXT: store double [[TMP4]], ptr [[SCEVGEP]], align 8
+; MSSA-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP6]], [[TMP]]
+; MSSA-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]]
+; MSSA: return:
+; MSSA-NEXT: ret void
;
entry:
%0 = add i32 %N, -1
@@ -314,31 +435,57 @@ return:
; This requires phi translation of the adds.
define void @test7(i32 %N, ptr nocapture %G) nounwind ssp {
-; CHECK-LABEL: @test7(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds double, ptr [[G:%.*]], i64 1
-; CHECK-NEXT: store double 1.000000e+00, ptr [[TMP0]], align 8
-; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[N:%.*]], -1
-; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt i32 [[TMP1]], 1
-; CHECK-NEXT: br i1 [[TMP2]], label [[BB_NPH:%.*]], label [[RETURN:%.*]]
-; CHECK: bb.nph:
-; CHECK-NEXT: [[TMP:%.*]] = sext i32 [[TMP1]] to i64
-; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[TMP]], -1
-; CHECK-NEXT: br label [[BB:%.*]]
-; CHECK: bb:
-; CHECK-NEXT: [[TMP3:%.*]] = phi double [ 1.000000e+00, [[BB_NPH]] ], [ [[TMP5:%.*]], [[BB]] ]
-; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP9:%.*]], [[BB]] ]
-; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[INDVAR]], 2
-; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP8]]
-; CHECK-NEXT: [[TMP9]] = add i64 [[INDVAR]], 1
-; CHECK-NEXT: [[SCEVGEP10:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP9]]
-; CHECK-NEXT: [[TMP4:%.*]] = load double, ptr [[SCEVGEP]], align 8
-; CHECK-NEXT: [[TMP5]] = fadd double [[TMP3]], [[TMP4]]
-; CHECK-NEXT: store double [[TMP5]], ptr [[SCEVGEP]], align 8
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP9]], [[TMP7]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]]
-; CHECK: return:
-; CHECK-NEXT: ret void
+; MDEP-LABEL: @test7(
+; MDEP-NEXT: entry:
+; MDEP-NEXT: [[TMP0:%.*]] = getelementptr inbounds double, ptr [[G:%.*]], i64 1
+; MDEP-NEXT: store double 1.000000e+00, ptr [[TMP0]], align 8
+; MDEP-NEXT: [[TMP1:%.*]] = add i32 [[N:%.*]], -1
+; MDEP-NEXT: [[TMP2:%.*]] = icmp sgt i32 [[TMP1]], 1
+; MDEP-NEXT: br i1 [[TMP2]], label [[BB_NPH:%.*]], label [[RETURN:%.*]]
+; MDEP: bb.nph:
+; MDEP-NEXT: [[TMP:%.*]] = sext i32 [[TMP1]] to i64
+; MDEP-NEXT: [[TMP7:%.*]] = add i64 [[TMP]], -1
+; MDEP-NEXT: br label [[BB:%.*]]
+; MDEP: bb:
+; MDEP-NEXT: [[TMP3:%.*]] = phi double [ 1.000000e+00, [[BB_NPH]] ], [ [[TMP5:%.*]], [[BB]] ]
+; MDEP-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP9:%.*]], [[BB]] ]
+; MDEP-NEXT: [[TMP8:%.*]] = add i64 [[INDVAR]], 2
+; MDEP-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP8]]
+; MDEP-NEXT: [[TMP9]] = add i64 [[INDVAR]], 1
+; MDEP-NEXT: [[SCEVGEP10:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP9]]
+; MDEP-NEXT: [[TMP4:%.*]] = load double, ptr [[SCEVGEP]], align 8
+; MDEP-NEXT: [[TMP5]] = fadd double [[TMP3]], [[TMP4]]
+; MDEP-NEXT: store double [[TMP5]], ptr [[SCEVGEP]], align 8
+; MDEP-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP9]], [[TMP7]]
+; MDEP-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]]
+; MDEP: return:
+; MDEP-NEXT: ret void
+;
+; MSSA-LABEL: @test7(
+; MSSA-NEXT: entry:
+; MSSA-NEXT: [[TMP0:%.*]] = getelementptr inbounds double, ptr [[G:%.*]], i64 1
+; MSSA-NEXT: store double 1.000000e+00, ptr [[TMP0]], align 8
+; MSSA-NEXT: [[TMP1:%.*]] = add i32 [[N:%.*]], -1
+; MSSA-NEXT: [[TMP2:%.*]] = icmp sgt i32 [[TMP1]], 1
+; MSSA-NEXT: br i1 [[TMP2]], label [[BB_NPH:%.*]], label [[RETURN:%.*]]
+; MSSA: bb.nph:
+; MSSA-NEXT: [[TMP:%.*]] = sext i32 [[TMP1]] to i64
+; MSSA-NEXT: [[TMP7:%.*]] = add i64 [[TMP]], -1
+; MSSA-NEXT: br label [[BB:%.*]]
+; MSSA: bb:
+; MSSA-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP9:%.*]], [[BB]] ]
+; MSSA-NEXT: [[TMP8:%.*]] = add i64 [[INDVAR]], 2
+; MSSA-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP8]]
+; MSSA-NEXT: [[TMP9]] = add i64 [[INDVAR]], 1
+; MSSA-NEXT: [[SCEVGEP10:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP9]]
+; MSSA-NEXT: [[TMP3:%.*]] = load double, ptr [[SCEVGEP10]], align 8
+; MSSA-NEXT: [[TMP4:%.*]] = load double, ptr [[SCEVGEP]], align 8
+; MSSA-NEXT: [[TMP5:%.*]] = fadd double [[TMP3]], [[TMP4]]
+; MSSA-NEXT: store double [[TMP5]], ptr [[SCEVGEP]], align 8
+; MSSA-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP9]], [[TMP7]]
+; MSSA-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]]
+; MSSA: return:
+; MSSA-NEXT: ret void
;
entry:
%0 = getelementptr inbounds double, ptr %G, i64 1
@@ -374,22 +521,37 @@ return:
;; Here the loaded address isn't available in 'block2' at all, requiring a new
;; GEP to be inserted into it.
define i32 @test8(ptr %p, ptr %q, ptr %Hack, i1 %C) {
-; CHECK-LABEL: @test8(
-; CHECK-NEXT: block1:
-; CHECK-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
-; CHECK: block2:
-; CHECK-NEXT: [[P3_PHI_TRANS_INSERT:%.*]] = getelementptr i32, ptr [[Q:%.*]], i32 1
-; CHECK-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[P3_PHI_TRANS_INSERT]], align 4
-; CHECK-NEXT: br label [[BLOCK4:%.*]]
-; CHECK: block3:
-; CHECK-NEXT: [[A:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 1
-; CHECK-NEXT: store i32 0, ptr [[A]], align 4
-; CHECK-NEXT: br label [[BLOCK4]]
-; CHECK: block4:
-; CHECK-NEXT: [[PRE:%.*]] = phi i32 [ 0, [[BLOCK3]] ], [ [[PRE_PRE]], [[BLOCK2]] ]
-; CHECK-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q]], [[BLOCK2]] ]
-; CHECK-NEXT: [[P3:%.*]] = getelementptr i32, ptr [[P2]], i32 1
-; CHECK-NEXT: ret i32 [[PRE]]
+; MDEP-LABEL: @test8(
+; MDEP-NEXT: block1:
+; MDEP-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
+; MDEP: block2:
+; MDEP-NEXT: [[P3_PHI_TRANS_INSERT:%.*]] = getelementptr i32, ptr [[Q:%.*]], i32 1
+; MDEP-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[P3_PHI_TRANS_INSERT]], align 4
+; MDEP-NEXT: br label [[BLOCK4:%.*]]
+; MDEP: block3:
+; MDEP-NEXT: [[A:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 1
+; MDEP-NEXT: store i32 0, ptr [[A]], align 4
+; MDEP-NEXT: br label [[BLOCK4]]
+; MDEP: block4:
+; MDEP-NEXT: [[PRE:%.*]] = phi i32 [ 0, [[BLOCK3]] ], [ [[PRE_PRE]], [[BLOCK2]] ]
+; MDEP-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q]], [[BLOCK2]] ]
+; MDEP-NEXT: [[P3:%.*]] = getelementptr i32, ptr [[P2]], i32 1
+; MDEP-NEXT: ret i32 [[PRE]]
+;
+; MSSA-LABEL: @test8(
+; MSSA-NEXT: block1:
+; MSSA-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
+; MSSA: block2:
+; MSSA-NEXT: br label [[BLOCK4:%.*]]
+; MSSA: block3:
+; MSSA-NEXT: [[A:%.*]] = getelementptr i32, ptr [[P:%.*]], i32 1
+; MSSA-NEXT: store i32 0, ptr [[A]], align 4
+; MSSA-NEXT: br label [[BLOCK4]]
+; MSSA: block4:
+; MSSA-NEXT: [[P2:%.*]] = phi ptr [ [[P]], [[BLOCK3]] ], [ [[Q:%.*]], [[BLOCK2]] ]
+; MSSA-NEXT: [[P3:%.*]] = getelementptr i32, ptr [[P2]], i32 1
+; MSSA-NEXT: [[PRE:%.*]] = load i32, ptr [[P3]], align 4
+; MSSA-NEXT: ret i32 [[PRE]]
;
block1:
br i1 %C, label %block2, label %block3
@@ -417,31 +579,55 @@ block4:
; This requires phi translation of the adds.
define void @test9(i32 %N, ptr nocapture %G) nounwind ssp {
-; CHECK-LABEL: @test9(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[N:%.*]], -1
-; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[TMP0]], 1
-; CHECK-NEXT: br i1 [[TMP1]], label [[BB_NPH:%.*]], label [[RETURN:%.*]]
-; CHECK: bb.nph:
-; CHECK-NEXT: [[TMP:%.*]] = sext i32 [[TMP0]] to i64
-; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[TMP]], -1
-; CHECK-NEXT: [[SCEVGEP10_PHI_TRANS_INSERT:%.*]] = getelementptr double, ptr [[G:%.*]], i64 1
-; CHECK-NEXT: [[DOTPRE:%.*]] = load double, ptr [[SCEVGEP10_PHI_TRANS_INSERT]], align 8
-; CHECK-NEXT: br label [[BB:%.*]]
-; CHECK: bb:
-; CHECK-NEXT: [[TMP2:%.*]] = phi double [ [[DOTPRE]], [[BB_NPH]] ], [ [[TMP4:%.*]], [[BB]] ]
-; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP9:%.*]], [[BB]] ]
-; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[INDVAR]], 2
-; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP8]]
-; CHECK-NEXT: [[TMP9]] = add i64 [[INDVAR]], 1
-; CHECK-NEXT: [[SCEVGEP10:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP9]]
-; CHECK-NEXT: [[TMP3:%.*]] = load double, ptr [[SCEVGEP]], align 8
-; CHECK-NEXT: [[TMP4]] = fadd double [[TMP2]], [[TMP3]]
-; CHECK-NEXT: store double [[TMP4]], ptr [[SCEVGEP]], align 8
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP9]], [[TMP7]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]]
-; CHECK: return:
-; CHECK-NEXT: ret void
+; MDEP-LABEL: @test9(
+; MDEP-NEXT: entry:
+; MDEP-NEXT: [[TMP0:%.*]] = add i32 [[N:%.*]], -1
+; MDEP-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[TMP0]], 1
+; MDEP-NEXT: br i1 [[TMP1]], label [[BB_NPH:%.*]], label [[RETURN:%.*]]
+; MDEP: bb.nph:
+; MDEP-NEXT: [[TMP:%.*]] = sext i32 [[TMP0]] to i64
+; MDEP-NEXT: [[TMP7:%.*]] = add i64 [[TMP]], -1
+; MDEP-NEXT: [[SCEVGEP10_PHI_TRANS_INSERT:%.*]] = getelementptr double, ptr [[G:%.*]], i64 1
+; MDEP-NEXT: [[DOTPRE:%.*]] = load double, ptr [[SCEVGEP10_PHI_TRANS_INSERT]], align 8
+; MDEP-NEXT: br label [[BB:%.*]]
+; MDEP: bb:
+; MDEP-NEXT: [[TMP2:%.*]] = phi double [ [[DOTPRE]], [[BB_NPH]] ], [ [[TMP4:%.*]], [[BB]] ]
+; MDEP-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP9:%.*]], [[BB]] ]
+; MDEP-NEXT: [[TMP8:%.*]] = add i64 [[INDVAR]], 2
+; MDEP-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP8]]
+; MDEP-NEXT: [[TMP9]] = add i64 [[INDVAR]], 1
+; MDEP-NEXT: [[SCEVGEP10:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP9]]
+; MDEP-NEXT: [[TMP3:%.*]] = load double, ptr [[SCEVGEP]], align 8
+; MDEP-NEXT: [[TMP4]] = fadd double [[TMP2]], [[TMP3]]
+; MDEP-NEXT: store double [[TMP4]], ptr [[SCEVGEP]], align 8
+; MDEP-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP9]], [[TMP7]]
+; MDEP-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]]
+; MDEP: return:
+; MDEP-NEXT: ret void
+;
+; MSSA-LABEL: @test9(
+; MSSA-NEXT: entry:
+; MSSA-NEXT: [[TMP0:%.*]] = add i32 [[N:%.*]], -1
+; MSSA-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[TMP0]], 1
+; MSSA-NEXT: br i1 [[TMP1]], label [[BB_NPH:%.*]], label [[RETURN:%.*]]
+; MSSA: bb.nph:
+; MSSA-NEXT: [[TMP:%.*]] = sext i32 [[TMP0]] to i64
+; MSSA-NEXT: [[TMP7:%.*]] = add i64 [[TMP]], -1
+; MSSA-NEXT: br label [[BB:%.*]]
+; MSSA: bb:
+; MSSA-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP9:%.*]], [[BB]] ]
+; MSSA-NEXT: [[TMP8:%.*]] = add i64 [[INDVAR]], 2
+; MSSA-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G:%.*]], i64 [[TMP8]]
+; MSSA-NEXT: [[TMP9]] = add i64 [[INDVAR]], 1
+; MSSA-NEXT: [[SCEVGEP10:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP9]]
+; MSSA-NEXT: [[TMP2:%.*]] = load double, ptr [[SCEVGEP10]], align 8
+; MSSA-NEXT: [[TMP3:%.*]] = load double, ptr [[SCEVGEP]], align 8
+; MSSA-NEXT: [[TMP4:%.*]] = fadd double [[TMP2]], [[TMP3]]
+; MSSA-NEXT: store double [[TMP4]], ptr [[SCEVGEP]], align 8
+; MSSA-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP9]], [[TMP7]]
+; MSSA-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]]
+; MSSA: return:
+; MSSA-NEXT: ret void
;
entry:
add i32 0, 0
@@ -482,35 +668,62 @@ return:
; PR5501
define void @test10(i32 %N, ptr nocapture %G) nounwind ssp {
-; CHECK-LABEL: @test10(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[N:%.*]], -1
-; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[TMP0]], 1
-; CHECK-NEXT: br i1 [[TMP1]], label [[BB_NPH:%.*]], label [[RETURN:%.*]]
-; CHECK: bb.nph:
-; CHECK-NEXT: [[TMP:%.*]] = sext i32 [[TMP0]] to i64
-; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[TMP]], -1
-; CHECK-NEXT: [[SCEVGEP12_PHI_TRANS_INSERT:%.*]] = getelementptr double, ptr [[G:%.*]], i64 1
-; CHECK-NEXT: [[DOTPRE:%.*]] = load double, ptr [[SCEVGEP12_PHI_TRANS_INSERT]], align 8
-; CHECK-NEXT: [[DOTPRE1:%.*]] = load double, ptr [[G]], align 8
-; CHECK-NEXT: br label [[BB:%.*]]
-; CHECK: bb:
-; CHECK-NEXT: [[TMP2:%.*]] = phi double [ [[DOTPRE1]], [[BB_NPH]] ], [ [[TMP6:%.*]], [[BB]] ]
-; CHECK-NEXT: [[TMP3:%.*]] = phi double [ [[DOTPRE]], [[BB_NPH]] ], [ [[TMP4:%.*]], [[BB]] ]
-; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP11:%.*]], [[BB]] ]
-; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G]], i64 [[INDVAR]]
-; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDVAR]], 2
-; CHECK-NEXT: [[SCEVGEP10:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP9]]
-; CHECK-NEXT: [[TMP11]] = add i64 [[INDVAR]], 1
-; CHECK-NEXT: [[SCEVGEP12:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP11]]
-; CHECK-NEXT: [[TMP4]] = load double, ptr [[SCEVGEP10]], align 8
-; CHECK-NEXT: [[TMP5:%.*]] = fadd double [[TMP3]], [[TMP4]]
-; CHECK-NEXT: [[TMP6]] = fadd double [[TMP5]], [[TMP2]]
-; CHECK-NEXT: store double [[TMP6]], ptr [[SCEVGEP12]], align 8
-; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP11]], [[TMP8]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]]
-; CHECK: return:
-; CHECK-NEXT: ret void
+; MDEP-LABEL: @test10(
+; MDEP-NEXT: entry:
+; MDEP-NEXT: [[TMP0:%.*]] = add i32 [[N:%.*]], -1
+; MDEP-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[TMP0]], 1
+; MDEP-NEXT: br i1 [[TMP1]], label [[BB_NPH:%.*]], label [[RETURN:%.*]]
+; MDEP: bb.nph:
+; MDEP-NEXT: [[TMP:%.*]] = sext i32 [[TMP0]] to i64
+; MDEP-NEXT: [[TMP8:%.*]] = add i64 [[TMP]], -1
+; MDEP-NEXT: [[SCEVGEP12_PHI_TRANS_INSERT:%.*]] = getelementptr double, ptr [[G:%.*]], i64 1
+; MDEP-NEXT: [[DOTPRE:%.*]] = load double, ptr [[SCEVGEP12_PHI_TRANS_INSERT]], align 8
+; MDEP-NEXT: [[DOTPRE1:%.*]] = load double, ptr [[G]], align 8
+; MDEP-NEXT: br label [[BB:%.*]]
+; MDEP: bb:
+; MDEP-NEXT: [[TMP2:%.*]] = phi double [ [[DOTPRE1]], [[BB_NPH]] ], [ [[TMP6:%.*]], [[BB]] ]
+; MDEP-NEXT: [[TMP3:%.*]] = phi double [ [[DOTPRE]], [[BB_NPH]] ], [ [[TMP4:%.*]], [[BB]] ]
+; MDEP-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP11:%.*]], [[BB]] ]
+; MDEP-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G]], i64 [[INDVAR]]
+; MDEP-NEXT: [[TMP9:%.*]] = add i64 [[INDVAR]], 2
+; MDEP-NEXT: [[SCEVGEP10:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP9]]
+; MDEP-NEXT: [[TMP11]] = add i64 [[INDVAR]], 1
+; MDEP-NEXT: [[SCEVGEP12:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP11]]
+; MDEP-NEXT: [[TMP4]] = load double, ptr [[SCEVGEP10]], align 8
+; MDEP-NEXT: [[TMP5:%.*]] = fadd double [[TMP3]], [[TMP4]]
+; MDEP-NEXT: [[TMP6]] = fadd double [[TMP5]], [[TMP2]]
+; MDEP-NEXT: store double [[TMP6]], ptr [[SCEVGEP12]], align 8
+; MDEP-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP11]], [[TMP8]]
+; MDEP-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]]
+; MDEP: return:
+; MDEP-NEXT: ret void
+;
+; MSSA-LABEL: @test10(
+; MSSA-NEXT: entry:
+; MSSA-NEXT: [[TMP0:%.*]] = add i32 [[N:%.*]], -1
+; MSSA-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[TMP0]], 1
+; MSSA-NEXT: br i1 [[TMP1]], label [[BB_NPH:%.*]], label [[RETURN:%.*]]
+; MSSA: bb.nph:
+; MSSA-NEXT: [[TMP:%.*]] = sext i32 [[TMP0]] to i64
+; MSSA-NEXT: [[TMP8:%.*]] = add i64 [[TMP]], -1
+; MSSA-NEXT: br label [[BB:%.*]]
+; MSSA: bb:
+; MSSA-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH]] ], [ [[TMP11:%.*]], [[BB]] ]
+; MSSA-NEXT: [[SCEVGEP:%.*]] = getelementptr double, ptr [[G:%.*]], i64 [[INDVAR]]
+; MSSA-NEXT: [[TMP9:%.*]] = add i64 [[INDVAR]], 2
+; MSSA-NEXT: [[SCEVGEP10:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP9]]
+; MSSA-NEXT: [[TMP11]] = add i64 [[INDVAR]], 1
+; MSSA-NEXT: [[SCEVGEP12:%.*]] = getelementptr double, ptr [[G]], i64 [[TMP11]]
+; MSSA-NEXT: [[TMP2:%.*]] = load double, ptr [[SCEVGEP12]], align 8
+; MSSA-NEXT: [[TMP3:%.*]] = load double, ptr [[SCEVGEP10]], align 8
+; MSSA-NEXT: [[TMP4:%.*]] = fadd double [[TMP2]], [[TMP3]]
+; MSSA-NEXT: [[TMP5:%.*]] = load double, ptr [[SCEVGEP]], align 8
+; MSSA-NEXT: [[TMP6:%.*]] = fadd double [[TMP4]], [[TMP5]]
+; MSSA-NEXT: store double [[TMP6]], ptr [[SCEVGEP12]], align 8
+; MSSA-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[TMP11]], [[TMP8]]
+; MSSA-NEXT: br i1 [[EXITCOND]], label [[RETURN]], label [[BB]]
+; MSSA: return:
+; MSSA-NEXT: ret void
;
entry:
%0 = add i32 %N, -1
@@ -547,24 +760,40 @@ return:
; Test critical edge splitting.
define i32 @test11(ptr %p, i1 %C, i32 %N) {
-; CHECK-LABEL: @test11(
-; CHECK-NEXT: block1:
-; CHECK-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
-; CHECK: block2:
-; CHECK-NEXT: [[COND:%.*]] = icmp sgt i32 [[N:%.*]], 1
-; CHECK-NEXT: br i1 [[COND]], label [[BLOCK2_BLOCK4_CRIT_EDGE:%.*]], label [[BLOCK5:%.*]]
-; CHECK: block2.block4_crit_edge:
-; CHECK-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[P:%.*]], align 4
-; CHECK-NEXT: br label [[BLOCK4:%.*]]
-; CHECK: block3:
-; CHECK-NEXT: store i32 0, ptr [[P]], align 4
-; CHECK-NEXT: br label [[BLOCK4]]
-; CHECK: block4:
-; CHECK-NEXT: [[PRE:%.*]] = phi i32 [ [[PRE_PRE]], [[BLOCK2_BLOCK4_CRIT_EDGE]] ], [ 0, [[BLOCK3]] ]
-; CHECK-NEXT: br label [[BLOCK5]]
-; CHECK: block5:
-; CHECK-NEXT: [[RET:%.*]] = phi i32 [ 0, [[BLOCK2]] ], [ [[PRE]], [[BLOCK4]] ]
-; CHECK-NEXT: ret i32 [[RET]]
+; MDEP-LABEL: @test11(
+; MDEP-NEXT: block1:
+; MDEP-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
+; MDEP: block2:
+; MDEP-NEXT: [[COND:%.*]] = icmp sgt i32 [[N:%.*]], 1
+; MDEP-NEXT: br i1 [[COND]], label [[BLOCK2_BLOCK4_CRIT_EDGE:%.*]], label [[BLOCK5:%.*]]
+; MDEP: block2.block4_crit_edge:
+; MDEP-NEXT: [[PRE_PRE:%.*]] = load i32, ptr [[P:%.*]], align 4
+; MDEP-NEXT: br label [[BLOCK4:%.*]]
+; MDEP: block3:
+; MDEP-NEXT: store i32 0, ptr [[P]], align 4
+; MDEP-NEXT: br label [[BLOCK4]]
+; MDEP: block4:
+; MDEP-NEXT: [[PRE:%.*]] = phi i32 [ [[PRE_PRE]], [[BLOCK2_BLOCK4_CRIT_EDGE]] ], [ 0, [[BLOCK3]] ]
+; MDEP-NEXT: br label [[BLOCK5]]
+; MDEP: block5:
+; MDEP-NEXT: [[RET:%.*]] = phi i32 [ 0, [[BLOCK2]] ], [ [[PRE]], [[BLOCK4]] ]
+; MDEP-NEXT: ret i32 [[RET]]
+;
+; MSSA-LABEL: @test11(
+; MSSA-NEXT: block1:
+; MSSA-NEXT: br i1 [[C:%.*]], label [[BLOCK2:%.*]], label [[BLOCK3:%.*]]
+; MSSA: block2:
+; MSSA-NEXT: [[COND:%.*]] = icmp sgt i32 [[N:%.*]], 1
+; MSSA-NEXT: br i1 [[COND]], label [[BLOCK4:%.*]], label [[BLOCK5:%.*]]
+; MSSA: block3:
+; MSSA-NEXT: store i32 0, ptr [[P:%.*]], align 4
+; MSSA-NEXT: br label [[BLOCK4]]
+; MSSA: block4:
+; MSSA-NEXT: [[PRE:%.*]] = load i32, ptr [[P]], align 4
+; MSSA-NEXT: br label [[BLOCK5]]
+; MSSA: block5:
+; MSSA-NEXT: [[RET:%.*]] = phi i32 [ 0, [[BLOCK2]] ], [ [[PRE]], [[BLOCK4]] ]
+; MSSA-NEXT: ret i32 [[RET]]
;
block1:
br i1 %C, label %block2, label %block3
@@ -726,17 +955,30 @@ follow_2:
; Since it is OK to speculate, PRE is allowed.
define i32 @test15(ptr noalias nocapture readonly dereferenceable(8) align 4 %x, ptr noalias nocapture %r, i32 %a) nofree nosync {
-; CHECK-LABEL: @test15(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[A:%.*]], 0
-; CHECK-NEXT: [[VV_PRE:%.*]] = load i32, ptr [[X:%.*]], align 4
-; CHECK-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]]
-; CHECK: if.then:
-; CHECK-NEXT: store i32 [[VV_PRE]], ptr [[R:%.*]], align 4
-; CHECK-NEXT: br label [[IF_END]]
-; CHECK: if.end:
-; CHECK-NEXT: call void @f()
-; CHECK-NEXT: ret i32 [[VV_PRE]]
+; MDEP-LABEL: @test15(
+; MDEP-NEXT: entry:
+; MDEP-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[A:%.*]], 0
+; MDEP-NEXT: [[VV_PRE:%.*]] = load i32, ptr [[X:%.*]], align 4
+; MDEP-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]]
+; MDEP: if.then:
+; MDEP-NEXT: store i32 [[VV_PRE]], ptr [[R:%.*]], align 4
+; MDEP-NEXT: br label [[IF_END]]
+; MDEP: if.end:
+; MDEP-NEXT: call void @f()
+; MDEP-NEXT: ret i32 [[VV_PRE]]
+;
+; MSSA-LABEL: @test15(
+; MSSA-NEXT: entry:
+; MSSA-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[A:%.*]], 0
+; MSSA-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]]
+; MSSA: if.then:
+; MSSA-NEXT: [[UU:%.*]] = load i32, ptr [[X:%.*]], align 4
+; MSSA-NEXT: store i32 [[UU]], ptr [[R:%.*]], align 4
+; MSSA-NEXT: br label [[IF_END]]
+; MSSA: if.end:
+; MSSA-NEXT: call void @f()
+; MSSA-NEXT: [[VV:%.*]] = load i32, ptr [[X]], align 4
+; MSSA-NEXT: ret i32 [[VV]]
;
entry:
@@ -763,17 +1005,30 @@ if.end:
; Since it is OK to speculate, PRE is allowed.
define i32 @test16(ptr noalias nocapture readonly dereferenceable(8) align 4 %x, ptr noalias nocapture %r, i32 %a) nofree nosync {
-; CHECK-LABEL: @test16(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[A:%.*]], 0
-; CHECK-NEXT: [[VV_PRE:%.*]] = load i32, ptr [[X:%.*]], align 4
-; CHECK-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]]
-; CHECK: if.then:
-; CHECK-NEXT: store i32 [[VV_PRE]], ptr [[R:%.*]], align 4
-; CHECK-NEXT: br label [[IF_END]]
-; CHECK: if.end:
-; CHECK-NEXT: call void @f()
-; CHECK-NEXT: ret i32 [[VV_PRE]]
+; MDEP-LABEL: @test16(
+; MDEP-NEXT: entry:
+; MDEP-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[A:%.*]], 0
+; MDEP-NEXT: [[VV_PRE:%.*]] = load i32, ptr [[X:%.*]], align 4
+; MDEP-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]]
+; MDEP: if.then:
+; MDEP-NEXT: store i32 [[VV_PRE]], ptr [[R:%.*]], align 4
+; MDEP-NEXT: br label [[IF_END]]
+; MDEP: if.end:
+; MDEP-NEXT: call void @f()
+; MDEP-NEXT: ret i32 [[VV_PRE]]
+;
+; MSSA-LABEL: @test16(
+; MSSA-NEXT: entry:
+; MSSA-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[A:%.*]], 0
+; MSSA-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]]
+; MSSA: if.then:
+; MSSA-NEXT: [[UU:%.*]] = load i32, ptr [[X:%.*]], align 4
+; MSSA-NEXT: store i32 [[UU]], ptr [[R:%.*]], align 4
+; MSSA-NEXT: br label [[IF_END]]
+; MSSA: if.end:
+; MSSA-NEXT: call void @f()
+; MSSA-NEXT: [[VV:%.*]] = load i32, ptr [[X]], align 4
+; MSSA-NEXT: ret i32 [[VV]]
;
entry:
@@ -808,36 +1063,67 @@ declare i1 @bar()
; We can move all loads into predecessors.
define void @test17(ptr %p1, ptr %p2, ptr %p3, ptr %p4)
-; CHECK-LABEL: @test17(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[V1:%.*]] = load i64, ptr [[P1:%.*]], align 8
-; CHECK-NEXT: [[COND1:%.*]] = icmp sgt i64 [[V1]], 200
-; CHECK-NEXT: br i1 [[COND1]], label [[BB200:%.*]], label [[BB1:%.*]]
-; CHECK: bb1:
-; CHECK-NEXT: [[COND2:%.*]] = icmp sgt i64 [[V1]], 100
-; CHECK-NEXT: br i1 [[COND2]], label [[BB100:%.*]], label [[BB2:%.*]]
-; CHECK: bb2:
-; CHECK-NEXT: [[V2:%.*]] = add nsw i64 [[V1]], 1
-; CHECK-NEXT: store i64 [[V2]], ptr [[P1]], align 8
-; CHECK-NEXT: br label [[BB3:%.*]]
-; CHECK: bb3:
-; CHECK-NEXT: [[V3:%.*]] = phi i64 [ [[V3_PRE:%.*]], [[BB200]] ], [ [[V3_PRE1:%.*]], [[BB100]] ], [ [[V2]], [[BB2]] ]
-; CHECK-NEXT: store i64 [[V3]], ptr [[P2:%.*]], align 8
-; CHECK-NEXT: ret void
-; CHECK: bb100:
-; CHECK-NEXT: [[COND3:%.*]] = call i1 @foo()
-; CHECK-NEXT: [[V3_PRE1]] = load i64, ptr [[P1]], align 8
-; CHECK-NEXT: br i1 [[COND3]], label [[BB3]], label [[BB101:%.*]]
-; CHECK: bb101:
-; CHECK-NEXT: store i64 [[V3_PRE1]], ptr [[P3:%.*]], align 8
-; CHECK-NEXT: ret void
-; CHECK: bb200:
-; CHECK-NEXT: [[COND4:%.*]] = call i1 @bar()
-; CHECK-NEXT: [[V3_PRE]] = load i64, ptr [[P1]], align 8
-; CHECK-NEXT: br i1 [[COND4]], label [[BB3]], label [[BB201:%.*]]
-; CHECK: bb201:
-; CHECK-NEXT: store i64 [[V3_PRE]], ptr [[P4:%.*]], align 8
-; CHECK-NEXT: ret void
+; MDEP-LABEL: @test17(
+; MDEP-NEXT: entry:
+; MDEP-NEXT: [[V1:%.*]] = load i64, ptr [[P1:%.*]], align 8
+; MDEP-NEXT: [[COND1:%.*]] = icmp sgt i64 [[V1]], 200
+; MDEP-NEXT: br i1 [[COND1]], label [[BB200:%.*]], label [[BB1:%.*]]
+; MDEP: bb1:
+; MDEP-NEXT: [[COND2:%.*]] = icmp sgt i64 [[V1]], 100
+; MDEP-NEXT: br i1 [[COND2]], label [[BB100:%.*]], label [[BB2:%.*]]
+; MDEP: bb2:
+; MDEP-NEXT: [[V2:%.*]] = add nsw i64 [[V1]], 1
+; MDEP-NEXT: store i64 [[V2]], ptr [[P1]], align 8
+; MDEP-NEXT: br label [[BB3:%.*]]
+; MDEP: bb3:
+; MDEP-NEXT: [[V3:%.*]] = phi i64 [ [[V3_PRE:%.*]], [[BB200]] ], [ [[V3_PRE1:%.*]], [[BB100]] ], [ [[V2]], [[BB2]] ]
+; MDEP-NEXT: store i64 [[V3]], ptr [[P2:%.*]], align 8
+; MDEP-NEXT: ret void
+; MDEP: bb100:
+; MDEP-NEXT: [[COND3:%.*]] = call i1 @foo()
+; MDEP-NEXT: [[V3_PRE1]] = load i64, ptr [[P1]], align 8
+; MDEP-NEXT: br i1 [[COND3]], label [[BB3]], label [[BB101:%.*]]
+; MDEP: bb101:
+; MDEP-NEXT: store i64 [[V3_PRE1]], ptr [[P3:%.*]], align 8
+; MDEP-NEXT: ret void
+; MDEP: bb200:
+; MDEP-NEXT: [[COND4:%.*]] = call i1 @bar()
+; MDEP-NEXT: [[V3_PRE]] = load i64, ptr [[P1]], align 8
+; MDEP-NEXT: br i1 [[COND4]], label [[BB3]], label [[BB201:%.*]]
+; MDEP: bb201:
+; MDEP-NEXT: store i64 [[V3_PRE]], ptr [[P4:%.*]], align 8
+; MDEP-NEXT: ret void
+;
+; MSSA-LABEL: @test17(
+; MSSA-NEXT: entry:
+; MSSA-NEXT: [[V1:%.*]] = load i64, ptr [[P1:%.*]], align 8
+; MSSA-NEXT: [[COND1:%.*]] = icmp sgt i64 [[V1]], 200
+; MSSA-NEXT: br i1 [[COND1]], label [[BB200:%.*]], label [[BB1:%.*]]
+; MSSA: bb1:
+; MSSA-NEXT: [[COND2:%.*]] = icmp sgt i64 [[V1]], 100
+; MSSA-NEXT: br i1 [[COND2]], label [[BB100:%.*]], label [[BB2:%.*]]
+; MSSA: bb2:
+; MSSA-NEXT: [[V2:%.*]] = add nsw i64 [[V1]], 1
+; MSSA-NEXT: store i64 [[V2]], ptr [[P1]], align 8
+; MSSA-NEXT: br label [[BB3:%.*]]
+; MSSA: bb3:
+; MSSA-NEXT: [[V3:%.*]] = load i64, ptr [[P1]], align 8
+; MSSA-NEXT: store i64 [[V3]], ptr [[P2:%.*]], align 8
+; MSSA-NEXT: ret void
+; MSSA: bb100:
+; MSSA-NEXT: [[COND3:%.*]] = call i1 @foo()
+; MSSA-NEXT: br i1 [[COND3]], label [[BB3]], label [[BB101:%.*]]
+; MSSA: bb101:
+; MSSA-NEXT: [[V4:%.*]] = load i64, ptr [[P1]], align 8
+; MSSA-NEXT: store i64 [[V4]], ptr [[P3:%.*]], align 8
+; MSSA-NEXT: ret void
+; MSSA: bb200:
+; MSSA-NEXT: [[COND4:%.*]] = call i1 @bar()
+; MSSA-NEXT: br i1 [[COND4]], label [[BB3]], label [[BB201:%.*]]
+; MSSA: bb201:
+; MSSA-NEXT: [[V5:%.*]] = load i64, ptr [[P1]], align 8
+; MSSA-NEXT: store i64 [[V5]], ptr [[P4:%.*]], align 8
+; MSSA-NEXT: ret void
;
{
entry:
@@ -882,18 +1168,31 @@ bb201:
; So ValuesPerBlock[%if.then] should not be replaced when the load instruction
; is moved to %entry.
define void @test18(i1 %cond, ptr %p1, ptr %p2) {
-; CHECK-LABEL: @test18(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[V2_PRE:%.*]] = load i16, ptr [[P1:%.*]], align 2
-; CHECK-NEXT: br i1 [[COND:%.*]], label [[IF_END:%.*]], label [[IF_THEN:%.*]]
-; CHECK: if.then:
-; CHECK-NEXT: [[DEC:%.*]] = add i16 [[V2_PRE]], -1
-; CHECK-NEXT: store i16 [[DEC]], ptr [[P1]], align 2
-; CHECK-NEXT: br label [[IF_END]]
-; CHECK: if.end:
-; CHECK-NEXT: [[V2:%.*]] = phi i16 [ [[DEC]], [[IF_THEN]] ], [ [[V2_PRE]], [[ENTRY:%.*]] ]
-; CHECK-NEXT: store i16 [[V2]], ptr [[P2:%.*]], align 2
-; CHECK-NEXT: ret void
+; MDEP-LABEL: @test18(
+; MDEP-NEXT: entry:
+; MDEP-NEXT: [[V2_PRE:%.*]] = load i16, ptr [[P1:%.*]], align 2
+; MDEP-NEXT: br i1 [[COND:%.*]], label [[IF_END:%.*]], label [[IF_THEN:%.*]]
+; MDEP: if.then:
+; MDEP-NEXT: [[DEC:%.*]] = add i16 [[V2_PRE]], -1
+; MDEP-NEXT: store i16 [[DEC]], ptr [[P1]], align 2
+; MDEP-NEXT: br label [[IF_END]]
+; MDEP: if.end:
+; MDEP-NEXT: [[V2:%.*]] = phi i16 [ [[DEC]], [[IF_THEN]] ], [ [[V2_PRE]], [[ENTRY:%.*]] ]
+; MDEP-NEXT: store i16 [[V2]], ptr [[P2:%.*]], align 2
+; MDEP-NEXT: ret void
+;
+; MSSA-LABEL: @test18(
+; MSSA-NEXT: entry:
+; MSSA-NEXT: br i1 [[COND:%.*]], label [[IF_END:%.*]], label [[IF_THEN:%.*]]
+; MSSA: if.then:
+; MSSA-NEXT: [[V1:%.*]] = load i16, ptr [[P1:%.*]], align 2
+; MSSA-NEXT: [[DEC:%.*]] = add i16 [[V1]], -1
+; MSSA-NEXT: store i16 [[DEC]], ptr [[P1]], align 2
+; MSSA-NEXT: br label [[IF_END]]
+; MSSA: if.end:
+; MSSA-NEXT: [[V2:%.*]] = load i16, ptr [[P1]], align 2
+; MSSA-NEXT: store i16 [[V2]], ptr [[P2:%.*]], align 2
+; MSSA-NEXT: ret void
;
entry:
br i1 %cond, label %if.end, label %if.then
@@ -912,32 +1211,56 @@ if.end:
; PRE of load instructions should not cross exception handling instructions.
define void @test19(i1 %cond, ptr %p1, ptr %p2)
-; CHECK-LABEL: @test19(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: br i1 [[COND:%.*]], label [[THEN:%.*]], label [[ELSE:%.*]]
-; CHECK: then:
-; CHECK-NEXT: [[V2:%.*]] = load i64, ptr [[P2:%.*]], align 8
-; CHECK-NEXT: [[ADD:%.*]] = add i64 [[V2]], 1
-; CHECK-NEXT: store i64 [[ADD]], ptr [[P1:%.*]], align 8
-; CHECK-NEXT: br label [[END:%.*]]
-; CHECK: else:
-; CHECK-NEXT: invoke void @f()
-; CHECK-NEXT: to label [[ELSE_END_CRIT_EDGE:%.*]] unwind label [[LPAD:%.*]]
-; CHECK: else.end_crit_edge:
-; CHECK-NEXT: [[V1_PRE:%.*]] = load i64, ptr [[P1]], align 8
-; CHECK-NEXT: br label [[END]]
-; CHECK: end:
-; CHECK-NEXT: [[V1:%.*]] = phi i64 [ [[V1_PRE]], [[ELSE_END_CRIT_EDGE]] ], [ [[ADD]], [[THEN]] ]
-; CHECK-NEXT: [[AND:%.*]] = and i64 [[V1]], 100
-; CHECK-NEXT: store i64 [[AND]], ptr [[P2]], align 8
-; CHECK-NEXT: ret void
-; CHECK: lpad:
-; CHECK-NEXT: [[LP:%.*]] = landingpad { ptr, i32 }
-; CHECK-NEXT: cleanup
-; CHECK-NEXT: [[V3:%.*]] = load i64, ptr [[P1]], align 8
-; CHECK-NEXT: [[OR:%.*]] = or i64 [[V3]], 200
-; CHECK-NEXT: store i64 [[OR]], ptr [[P1]], align 8
-; CHECK-NEXT: resume { ptr, i32 } [[LP]]
+; MDEP-LABEL: @test19(
+; MDEP-NEXT: entry:
+; MDEP-NEXT: br i1 [[COND:%.*]], label [[THEN:%.*]], label [[ELSE:%.*]]
+; MDEP: then:
+; MDEP-NEXT: [[V2:%.*]] = load i64, ptr [[P2:%.*]], align 8
+; MDEP-NEXT: [[ADD:%.*]] = add i64 [[V2]], 1
+; MDEP-NEXT: store i64 [[ADD]], ptr [[P1:%.*]], align 8
+; MDEP-NEXT: br label [[END:%.*]]
+; MDEP: else:
+; MDEP-NEXT: invoke void @f()
+; MDEP-NEXT: to label [[ELSE_END_CRIT_EDGE:%.*]] unwind label [[LPAD:%.*]]
+; MDEP: else.end_crit_edge:
+; MDEP-NEXT: [[V1_PRE:%.*]] = load i64, ptr [[P1]], align 8
+; MDEP-NEXT: br label [[END]]
+; MDEP: end:
+; MDEP-NEXT: [[V1:%.*]] = phi i64 [ [[V1_PRE]], [[ELSE_END_CRIT_EDGE]] ], [ [[ADD]], [[THEN]] ]
+; MDEP-NEXT: [[AND:%.*]] = and i64 [[V1]], 100
+; MDEP-NEXT: store i64 [[AND]], ptr [[P2]], align 8
+; MDEP-NEXT: ret void
+; MDEP: lpad:
+; MDEP-NEXT: [[LP:%.*]] = landingpad { ptr, i32 }
+; MDEP-NEXT: cleanup
+; MDEP-NEXT: [[V3:%.*]] = load i64, ptr [[P1]], align 8
+; MDEP-NEXT: [[OR:%.*]] = or i64 [[V3]], 200
+; MDEP-NEXT: store i64 [[OR]], ptr [[P1]], align 8
+; MDEP-NEXT: resume { ptr, i32 } [[LP]]
+;
+; MSSA-LABEL: @test19(
+; MSSA-NEXT: entry:
+; MSSA-NEXT: br i1 [[COND:%.*]], label [[THEN:%.*]], label [[ELSE:%.*]]
+; MSSA: then:
+; MSSA-NEXT: [[V2:%.*]] = load i64, ptr [[P2:%.*]], align 8
+; MSSA-NEXT: [[ADD:%.*]] = add i64 [[V2]], 1
+; MSSA-NEXT: store i64 [[ADD]], ptr [[P1:%.*]], align 8
+; MSSA-NEXT: br label [[END:%.*]]
+; MSSA: else:
+; MSSA-NEXT: invoke void @f()
+; MSSA-NEXT: to label [[END]] unwind label [[LPAD:%.*]]
+; MSSA: end:
+; MSSA-NEXT: [[V1:%.*]] = load i64, ptr [[P1]], align 8
+; MSSA-NEXT: [[AND:%.*]] = and i64 [[V1]], 100
+; MSSA-NEXT: store i64 [[AND]], ptr [[P2]], align 8
+; MSSA-NEXT: ret void
+; MSSA: lpad:
+; MSSA-NEXT: [[LP:%.*]] = landingpad { ptr, i32 }
+; MSSA-NEXT: cleanup
+; MSSA-NEXT: [[V3:%.*]] = load i64, ptr [[P1]], align 8
+; MSSA-NEXT: [[OR:%.*]] = or i64 [[V3]], 200
+; MSSA-NEXT: store i64 [[OR]], ptr [[P1]], align 8
+; MSSA-NEXT: resume { ptr, i32 } [[LP]]
;
personality ptr @__CxxFrameHandler3 {
entry:
@@ -1050,29 +1373,50 @@ if.end:
; Call to function @maybethrow may cause exception, so the load of %v3 can't
; be hoisted to block %if.else.
define void @test22(i1 %cond, ptr %p1, ptr %p2) {
-; CHECK-LABEL: @test22(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: br i1 [[COND:%.*]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
-; CHECK: if.then:
-; CHECK-NEXT: [[V1:%.*]] = load i64, ptr [[P1:%.*]], align 8
-; CHECK-NEXT: [[DEC:%.*]] = add i64 [[V1]], -1
-; CHECK-NEXT: store i64 [[DEC]], ptr [[P1]], align 8
-; CHECK-NEXT: br label [[IF_END:%.*]]
-; CHECK: if.end:
-; CHECK-NEXT: [[V2:%.*]] = phi i64 [ [[V2_PRE:%.*]], [[IF_ELSE_IF_END_CRIT_EDGE:%.*]] ], [ [[DEC]], [[IF_THEN]] ]
-; CHECK-NEXT: store i64 [[V2]], ptr [[P2:%.*]], align 8
-; CHECK-NEXT: ret void
-; CHECK: if.else:
-; CHECK-NEXT: [[COND2:%.*]] = call i1 @foo()
-; CHECK-NEXT: br i1 [[COND2]], label [[IF_ELSE_IF_END_CRIT_EDGE]], label [[EXIT:%.*]]
-; CHECK: if.else.if.end_crit_edge:
-; CHECK-NEXT: [[V2_PRE]] = load i64, ptr [[P1]], align 8
-; CHECK-NEXT: br label [[IF_END]]
-; CHECK: exit:
-; CHECK-NEXT: [[_:%.*]] = call i1 @maybethrow()
-; CHECK-NEXT: [[V3:%.*]] = load i64, ptr [[P1]], align 8
-; CHECK-NEXT: store i64 [[V3]], ptr [[P2]], align 8
-; CHECK-NEXT: ret void
+; MDEP-LABEL: @test22(
+; MDEP-NEXT: entry:
+; MDEP-NEXT: br i1 [[COND:%.*]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
+; MDEP: if.then:
+; MDEP-NEXT: [[V1:%.*]] = load i64, ptr [[P1:%.*]], align 8
+; MDEP-NEXT: [[DEC:%.*]] = add i64 [[V1]], -1
+; MDEP-NEXT: store i64 [[DEC]], ptr [[P1]], align 8
+; MDEP-NEXT: br label [[IF_END:%.*]]
+; MDEP: if.end:
+; MDEP-NEXT: [[V2:%.*]] = phi i64 [ [[V2_PRE:%.*]], [[IF_ELSE_IF_END_CRIT_EDGE:%.*]] ], [ [[DEC]], [[IF_THEN]] ]
+; MDEP-NEXT: store i64 [[V2]], ptr [[P2:%.*]], align 8
+; MDEP-NEXT: ret void
+; MDEP: if.else:
+; MDEP-NEXT: [[COND2:%.*]] = call i1 @foo()
+; MDEP-NEXT: br i1 [[COND2]], label [[IF_ELSE_IF_END_CRIT_EDGE]], label [[EXIT:%.*]]
+; MDEP: if.else.if.end_crit_edge:
+; MDEP-NEXT: [[V2_PRE]] = load i64, ptr [[P1]], align 8
+; MDEP-NEXT: br label [[IF_END]]
+; MDEP: exit:
+; MDEP-NEXT: [[_:%.*]] = call i1 @maybethrow()
+; MDEP-NEXT: [[V3:%.*]] = load i64, ptr [[P1]], align 8
+; MDEP-NEXT: store i64 [[V3]], ptr [[P2]], align 8
+; MDEP-NEXT: ret void
+;
+; MSSA-LABEL: @test22(
+; MSSA-NEXT: entry:
+; MSSA-NEXT: br i1 [[COND:%.*]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
+; MSSA: if.then:
+; MSSA-NEXT: [[V1:%.*]] = load i64, ptr [[P1:%.*]], align 8
+; MSSA-NEXT: [[DEC:%.*]] = add i64 [[V1]], -1
+; MSSA-NEXT: store i64 [[DEC]], ptr [[P1]], align 8
+; MSSA-NEXT: br label [[IF_END:%.*]]
+; MSSA: if.end:
+; MSSA-NEXT: [[V2:%.*]] = load i64, ptr [[P1]], align 8
+; MSSA-NEXT: store i64 [[V2]], ptr [[P2:%.*]], align 8
+; MSSA-NEXT: ret void
+; MSSA: if.else:
+; MSSA-NEXT: [[COND2:%.*]] = call i1 @foo()
+; MSSA-NEXT: br i1 [[COND2]], label [[IF_END]], label [[EXIT:%.*]]
+; MSSA: exit:
+; MSSA-NEXT: [[_:%.*]] = call i1 @maybethrow()
+; MSSA-NEXT: [[V3:%.*]] = load i64, ptr [[P1]], align 8
+; MSSA-NEXT: store i64 [[V3]], ptr [[P2]], align 8
+; MSSA-NEXT: ret void
;
entry:
br i1 %cond, label %if.then, label %if.else
@@ -1106,21 +1450,38 @@ declare void @maybethrow() readnone
; also be replaced by ValuesPerBlock(BB, NewLoad). So we'll not use the deleted
; OldLoad in later PHI instruction.
define void @test23(i1 %cond1, i1 %cond2) {
-; CHECK-LABEL: @test23(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[G:%.*]] = alloca i64, align 8
-; CHECK-NEXT: [[VAL1_PRE:%.*]] = load i64, ptr @B, align 8
-; CHECK-NEXT: br i1 [[COND2:%.*]], label [[THEN:%.*]], label [[WRONG:%.*]]
-; CHECK: then:
-; CHECK-NEXT: br i1 [[COND1:%.*]], label [[STORE:%.*]], label [[EXIT:%.*]]
-; CHECK: store:
-; CHECK-NEXT: store i64 [[VAL1_PRE]], ptr @B, align 8
-; CHECK-NEXT: br label [[WRONG]]
-; CHECK: wrong:
-; CHECK-NEXT: store i64 [[VAL1_PRE]], ptr [[G]], align 8
-; CHECK-NEXT: ret void
-; CHECK: exit:
-; CHECK-NEXT: ret void
+; MDEP-LABEL: @test23(
+; MDEP-NEXT: entry:
+; MDEP-NEXT: [[G:%.*]] = alloca i64, align 8
+; MDEP-NEXT: [[VAL1_PRE:%.*]] = load i64, ptr @B, align 8
+; MDEP-NEXT: br i1 [[COND2:%.*]], label [[THEN:%.*]], label [[WRONG:%.*]]
+; MDEP: then:
+; MDEP-NEXT: br i1 [[COND1:%.*]], label [[STORE:%.*]], label [[EXIT:%.*]]
+; MDEP: store:
+; MDEP-NEXT: store i64 [[VAL1_PRE]], ptr @B, align 8
+; MDEP-NEXT: br label [[WRONG]]
+; MDEP: wrong:
+; MDEP-NEXT: store i64 [[VAL1_PRE]], ptr [[G]], align 8
+; MDEP-NEXT: ret void
+; MDEP: exit:
+; MDEP-NEXT: ret void
+;
+; MSSA-LABEL: @test23(
+; MSSA-NEXT: entry:
+; MSSA-NEXT: [[G:%.*]] = alloca i64, align 8
+; MSSA-NEXT: br i1 [[COND2:%.*]], label [[THEN:%.*]], label [[WRONG:%.*]]
+; MSSA: then:
+; MSSA-NEXT: [[VAL2:%.*]] = load i64, ptr @B, align 8
+; MSSA-NEXT: br i1 [[COND1:%.*]], label [[STORE:%.*]], label [[EXIT:%.*]]
+; MSSA: store:
+; MSSA-NEXT: store i64 [[VAL2]], ptr @B, align 8
+; MSSA-NEXT: br label [[WRONG]]
+; MSSA: wrong:
+; MSSA-NEXT: [[VAL1:%.*]] = load i64, ptr @B, align 8
+; MSSA-NEXT: store i64 [[VAL1]], ptr [[G]], align 8
+; MSSA-NEXT: ret void
+; MSSA: exit:
+; MSSA-NEXT: ret void
;
entry:
%G = alloca i64, align 8
diff --git a/llvm/test/Transforms/GVN/PRE/pre-loop-load-new-pm.ll b/llvm/test/Transforms/GVN/PRE/pre-loop-load-new-pm.ll
index e16c21e..4cd2e47 100644
--- a/llvm/test/Transforms/GVN/PRE/pre-loop-load-new-pm.ll
+++ b/llvm/test/Transforms/GVN/PRE/pre-loop-load-new-pm.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -aa-pipeline=basic-aa -enable-load-pre -enable-pre -passes=gvn -S < %s | FileCheck %s
+; RUN: opt -aa-pipeline=basic-aa -enable-load-pre -enable-pre -passes=gvn -S < %s | FileCheck %s --check-prefixes=CHECK,MDEP
+; RUN: opt -aa-pipeline=basic-aa -enable-load-pre -enable-pre -passes='gvn<memoryssa>' -S < %s | FileCheck %s --check-prefixes=CHECK,MSSA
declare void @side_effect()
declare i1 @side_effect_cond()
@@ -216,7 +217,7 @@ define i32 @test_load_on_exiting_cold_path_02(ptr %p) gc "statepoint-example" pe
; CHECK-NEXT: br label [[BACKEDGE]]
; CHECK: cold_path:
; CHECK-NEXT: invoke void @side_effect()
-; CHECK-NEXT: to label [[BACKEDGE]] unwind label [[COLD_EXIT:%.*]]
+; CHECK-NEXT: to label [[BACKEDGE]] unwind label [[COLD_EXIT:%.*]]
; CHECK: backedge:
; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], [[X]]
; CHECK-NEXT: [[LOOP_COND:%.*]] = icmp ult i32 [[IV_NEXT]], 1000
@@ -225,7 +226,7 @@ define i32 @test_load_on_exiting_cold_path_02(ptr %p) gc "statepoint-example" pe
; CHECK-NEXT: ret i32 [[X]]
; CHECK: cold_exit:
; CHECK-NEXT: [[LANDING_PAD:%.*]] = landingpad token
-; CHECK-NEXT: cleanup
+; CHECK-NEXT: cleanup
; CHECK-NEXT: ret i32 -1
;
entry:
@@ -447,7 +448,7 @@ define i32 @test_inner_loop(ptr %p, i1 %arg) {
; CHECK-NEXT: br label [[INNER_LOOP:%.*]]
; CHECK: inner_loop:
; CHECK-NEXT: call void @side_effect()
-; CHECK-NEXT: br i1 %arg, label [[INNER_LOOP]], label [[BACKEDGE]]
+; CHECK-NEXT: br i1 [[ARG:%.*]], label [[INNER_LOOP]], label [[BACKEDGE]]
; CHECK: backedge:
; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], [[X]]
; CHECK-NEXT: [[LOOP_COND:%.*]] = icmp ult i32 [[IV_NEXT]], 1000
@@ -633,3 +634,6 @@ exit:
cold_exit:
ret i32 -1
}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; MDEP: {{.*}}
+; MSSA: {{.*}}
diff --git a/llvm/test/Transforms/GVN/PRE/pre-no-cost-phi.ll b/llvm/test/Transforms/GVN/PRE/pre-no-cost-phi.ll
index 2009c29..22c628b 100644
--- a/llvm/test/Transforms/GVN/PRE/pre-no-cost-phi.ll
+++ b/llvm/test/Transforms/GVN/PRE/pre-no-cost-phi.ll
@@ -1,4 +1,6 @@
-; RUN: opt < %s -passes=gvn -S | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt < %s -passes=gvn -S | FileCheck %s --check-prefixes=CHECK,MDEP
+; RUN: opt < %s -passes='gvn<memoryssa>' -S | FileCheck %s --check-prefixes=CHECK,MSSA
; This testcase tests insertion of no-cost phis. That is,
; when the value is already available in every predecessor,
; and we just need to insert a phi node to merge the available values.
@@ -8,6 +10,22 @@
define i32 @mai(i32 %foo, i32 %a, i32 %b) {
+; CHECK-LABEL: define i32 @mai(
+; CHECK-SAME: i32 [[FOO:%.*]], i32 [[A:%.*]], i32 [[B:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i32 [[FOO]], 0
+; CHECK-NEXT: br i1 [[TMP1]], label %[[BB1:.*]], label %[[BB2:.*]]
+; CHECK: [[BB1]]:
+; CHECK-NEXT: [[TMP2:%.*]] = add nsw i32 [[A]], [[B]]
+; CHECK-NEXT: store i32 [[TMP2]], ptr @c, align 4
+; CHECK-NEXT: br label %[[MERGEBLOCK:.*]]
+; CHECK: [[BB2]]:
+; CHECK-NEXT: [[TMP3:%.*]] = add nsw i32 [[A]], [[B]]
+; CHECK-NEXT: store i32 [[TMP3]], ptr @d, align 4
+; CHECK-NEXT: br label %[[MERGEBLOCK]]
+; CHECK: [[MERGEBLOCK]]:
+; CHECK-NEXT: [[DOTPRE_PHI:%.*]] = phi i32 [ [[TMP3]], %[[BB2]] ], [ [[TMP2]], %[[BB1]] ]
+; CHECK-NEXT: ret i32 [[DOTPRE_PHI]]
+;
%1 = icmp ne i32 %foo, 0
br i1 %1, label %bb1, label %bb2
@@ -22,10 +40,11 @@ bb2:
br label %mergeblock
mergeblock:
-; CHECK: pre-phi = phi i32 [ %3, %bb2 ], [ %2, %bb1 ]
-; CHECK-NEXT: ret i32 %.pre-phi
%4 = add nsw i32 %a, %b
ret i32 %4
}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; MDEP: {{.*}}
+; MSSA: {{.*}}
diff --git a/llvm/test/Transforms/GVN/PRE/pre-poison-add.ll b/llvm/test/Transforms/GVN/PRE/pre-poison-add.ll
index d17c459..32f149b 100644
--- a/llvm/test/Transforms/GVN/PRE/pre-poison-add.ll
+++ b/llvm/test/Transforms/GVN/PRE/pre-poison-add.ll
@@ -1,52 +1,77 @@
-; RUN: opt < %s -passes=gvn -enable-pre -S | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt < %s -passes=gvn -enable-pre -S | FileCheck %s --check-prefixes=CHECK,MDEP
+; RUN: opt < %s -passes='gvn<memoryssa>' -enable-pre -S | FileCheck %s --check-prefixes=CHECK,MSSA
@H = common global i32 0
@G = common global i32 0
define i32 @test1(i1 %cond, i32 %v) nounwind {
-; CHECK-LABEL: @test1
+; CHECK-LABEL: define i32 @test1(
+; CHECK-SAME: i1 [[COND:%.*]], i32 [[V:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: br i1 [[COND]], label %[[BB:.*]], label %[[BB1:.*]]
+; CHECK: [[BB]]:
+; CHECK-NEXT: [[ADD_1:%.*]] = add i32 [[V]], 42
+; CHECK-NEXT: store i32 [[ADD_1]], ptr @G, align 4
+; CHECK-NEXT: br label %[[RETURN:.*]]
+; CHECK: [[BB1]]:
+; CHECK-NEXT: [[DOTPRE:%.*]] = add i32 [[V]], 42
+; CHECK-NEXT: br label %[[RETURN]]
+; CHECK: [[RETURN]]:
+; CHECK-NEXT: [[ADD_2_PRE_PHI:%.*]] = phi i32 [ [[DOTPRE]], %[[BB1]] ], [ [[ADD_1]], %[[BB]] ]
+; CHECK-NEXT: store i32 [[ADD_2_PRE_PHI]], ptr @H, align 4
+; CHECK-NEXT: ret i32 0
+;
entry:
- br i1 %cond, label %bb, label %bb1
+ br i1 %cond, label %bb, label %bb1
bb:
- %add.1 = add nuw nsw i32 %v, 42
-; CHECK: %add.1 = add i32 %v, 42
- store i32 %add.1, ptr @G, align 4
- br label %return
+ %add.1 = add nuw nsw i32 %v, 42
+ store i32 %add.1, ptr @G, align 4
+ br label %return
bb1:
-; CHECK: %.pre = add i32 %v, 42
- br label %return
+ br label %return
return:
-; CHECK: %add.2.pre-phi = phi i32 [ %.pre, %bb1 ], [ %add.1, %bb ]
-; CHECK-NEXT: store i32 %add.2.pre-phi, ptr @H, align 4
-; CHECK-NEXT: ret i32 0
- %add.2 = add i32 %v, 42
- store i32 %add.2, ptr @H, align 4
- ret i32 0
+ %add.2 = add i32 %v, 42
+ store i32 %add.2, ptr @H, align 4
+ ret i32 0
}
define i32 @test2(i1 %cond, i32 %v) nounwind {
-; CHECK-LABEL: @test2
+; CHECK-LABEL: define i32 @test2(
+; CHECK-SAME: i1 [[COND:%.*]], i32 [[V:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: br i1 [[COND]], label %[[BB:.*]], label %[[BB1:.*]]
+; CHECK: [[BB]]:
+; CHECK-NEXT: [[ADD_1:%.*]] = add i32 [[V]], 42
+; CHECK-NEXT: store i32 [[ADD_1]], ptr @G, align 4
+; CHECK-NEXT: br label %[[RETURN:.*]]
+; CHECK: [[BB1]]:
+; CHECK-NEXT: [[DOTPRE:%.*]] = add nuw nsw i32 [[V]], 42
+; CHECK-NEXT: br label %[[RETURN]]
+; CHECK: [[RETURN]]:
+; CHECK-NEXT: [[ADD_2_PRE_PHI:%.*]] = phi i32 [ [[DOTPRE]], %[[BB1]] ], [ [[ADD_1]], %[[BB]] ]
+; CHECK-NEXT: store i32 [[ADD_2_PRE_PHI]], ptr @H, align 4
+; CHECK-NEXT: ret i32 0
+;
entry:
- br i1 %cond, label %bb, label %bb1
+ br i1 %cond, label %bb, label %bb1
bb:
- %add.1 = add i32 %v, 42
-; CHECK: %add.1 = add i32 %v, 42
- store i32 %add.1, ptr @G, align 4
- br label %return
+ %add.1 = add i32 %v, 42
+ store i32 %add.1, ptr @G, align 4
+ br label %return
bb1:
-; CHECK: %.pre = add nuw nsw i32 %v, 42
- br label %return
+ br label %return
return:
-; CHECK: %add.2.pre-phi = phi i32 [ %.pre, %bb1 ], [ %add.1, %bb ]
-; CHECK-NEXT: store i32 %add.2.pre-phi, ptr @H, align 4
-; CHECK-NEXT: ret i32 0
- %add.2 = add nuw nsw i32 %v, 42
- store i32 %add.2, ptr @H, align 4
- ret i32 0
+ %add.2 = add nuw nsw i32 %v, 42
+ store i32 %add.2, ptr @H, align 4
+ ret i32 0
}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; MDEP: {{.*}}
+; MSSA: {{.*}}
diff --git a/llvm/test/Transforms/GVN/PRE/pre-single-pred.ll b/llvm/test/Transforms/GVN/PRE/pre-single-pred.ll
index 7342925..74bc6bc 100644
--- a/llvm/test/Transforms/GVN/PRE/pre-single-pred.ll
+++ b/llvm/test/Transforms/GVN/PRE/pre-single-pred.ll
@@ -1,4 +1,6 @@
-; RUN: opt < %s -passes=gvn -enable-load-pre -S | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt < %s -passes=gvn -enable-load-pre -S | FileCheck %s --check-prefixes=CHECK,MDEP
+; RUN: opt < %s -passes='gvn<memoryssa>' -enable-load-pre -S | FileCheck %s --check-prefixes=CHECK,MSSA
; RUN: opt < %s -passes="gvn<load-pre>" -enable-load-pre=false -S | FileCheck %s
; This testcase assumed we'll PRE the load into %for.cond, but we don't actually
; verify that doing so is safe. If there didn't _happen_ to be a load in
@@ -12,35 +14,85 @@
@p = external global i32
define i32 @f(i32 %n) nounwind {
+; MDEP-LABEL: define i32 @f(
+; MDEP-SAME: i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
+; MDEP-NEXT: [[ENTRY:.*]]:
+; MDEP-NEXT: br label %[[FOR_COND:.*]]
+; MDEP: [[FOR_COND]]:
+; MDEP-NEXT: [[I_0:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[INDVAR_NEXT:%.*]], %[[FOR_INC:.*]] ]
+; MDEP-NEXT: [[CMP:%.*]] = icmp slt i32 [[I_0]], [[N]]
+; MDEP-NEXT: br i1 [[CMP]], label %[[FOR_BODY:.*]], label %[[FOR_COND_FOR_END_CRIT_EDGE:.*]]
+; MDEP: [[FOR_COND_FOR_END_CRIT_EDGE]]:
+; MDEP-NEXT: [[TMP9_PRE:%.*]] = load i32, ptr @p, align 4
+; MDEP-NEXT: br label %[[FOR_END:.*]]
+; MDEP: [[FOR_BODY]]:
+; MDEP-NEXT: [[TMP3:%.*]] = load i32, ptr @p, align 4
+; MDEP-NEXT: [[DEC:%.*]] = add i32 [[TMP3]], -1
+; MDEP-NEXT: store i32 [[DEC]], ptr @p, align 4
+; MDEP-NEXT: [[CMP6:%.*]] = icmp slt i32 [[DEC]], 0
+; MDEP-NEXT: br i1 [[CMP6]], label %[[FOR_BODY_FOR_END_CRIT_EDGE:.*]], label %[[FOR_INC]]
+; MDEP: [[FOR_BODY_FOR_END_CRIT_EDGE]]:
+; MDEP-NEXT: br label %[[FOR_END]]
+; MDEP: [[FOR_INC]]:
+; MDEP-NEXT: [[INDVAR_NEXT]] = add i32 [[I_0]], 1
+; MDEP-NEXT: br label %[[FOR_COND]]
+; MDEP: [[FOR_END]]:
+; MDEP-NEXT: [[TMP9:%.*]] = phi i32 [ [[DEC]], %[[FOR_BODY_FOR_END_CRIT_EDGE]] ], [ [[TMP9_PRE]], %[[FOR_COND_FOR_END_CRIT_EDGE]] ]
+; MDEP-NEXT: ret i32 [[TMP9]]
+;
+; MSSA-LABEL: define i32 @f(
+; MSSA-SAME: i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
+; MSSA-NEXT: [[ENTRY:.*]]:
+; MSSA-NEXT: br label %[[FOR_COND:.*]]
+; MSSA: [[FOR_COND]]:
+; MSSA-NEXT: [[I_0:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[INDVAR_NEXT:%.*]], %[[FOR_INC:.*]] ]
+; MSSA-NEXT: [[CMP:%.*]] = icmp slt i32 [[I_0]], [[N]]
+; MSSA-NEXT: br i1 [[CMP]], label %[[FOR_BODY:.*]], label %[[FOR_COND_FOR_END_CRIT_EDGE:.*]]
+; MSSA: [[FOR_COND_FOR_END_CRIT_EDGE]]:
+; MSSA-NEXT: br label %[[FOR_END:.*]]
+; MSSA: [[FOR_BODY]]:
+; MSSA-NEXT: [[TMP3:%.*]] = load i32, ptr @p, align 4
+; MSSA-NEXT: [[DEC:%.*]] = add i32 [[TMP3]], -1
+; MSSA-NEXT: store i32 [[DEC]], ptr @p, align 4
+; MSSA-NEXT: [[CMP6:%.*]] = icmp slt i32 [[DEC]], 0
+; MSSA-NEXT: br i1 [[CMP6]], label %[[FOR_BODY_FOR_END_CRIT_EDGE:.*]], label %[[FOR_INC]]
+; MSSA: [[FOR_BODY_FOR_END_CRIT_EDGE]]:
+; MSSA-NEXT: br label %[[FOR_END]]
+; MSSA: [[FOR_INC]]:
+; MSSA-NEXT: [[INDVAR_NEXT]] = add i32 [[I_0]], 1
+; MSSA-NEXT: br label %[[FOR_COND]]
+; MSSA: [[FOR_END]]:
+; MSSA-NEXT: [[TMP9:%.*]] = load i32, ptr @p, align 4
+; MSSA-NEXT: ret i32 [[TMP9]]
+;
entry:
- br label %for.cond
+ br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %i.0 = phi i32 [ 0, %entry ], [ %indvar.next, %for.inc ] ; <i32> [#uses=2]
- %cmp = icmp slt i32 %i.0, %n ; <i1> [#uses=1]
- br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
+ %i.0 = phi i32 [ 0, %entry ], [ %indvar.next, %for.inc ] ; <i32> [#uses=2]
+ %cmp = icmp slt i32 %i.0, %n ; <i1> [#uses=1]
+ br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
for.cond.for.end_crit_edge: ; preds = %for.cond
- br label %for.end
+ br label %for.end
-; CHECK: for.body:
-; CHECK-NEXT: %tmp3 = load i32, ptr @p
for.body: ; preds = %for.cond
- %tmp3 = load i32, ptr @p ; <i32> [#uses=1]
- %dec = add i32 %tmp3, -1 ; <i32> [#uses=2]
- store i32 %dec, ptr @p
- %cmp6 = icmp slt i32 %dec, 0 ; <i1> [#uses=1]
- br i1 %cmp6, label %for.body.for.end_crit_edge, label %for.inc
+ %tmp3 = load i32, ptr @p ; <i32> [#uses=1]
+ %dec = add i32 %tmp3, -1 ; <i32> [#uses=2]
+ store i32 %dec, ptr @p
+ %cmp6 = icmp slt i32 %dec, 0 ; <i1> [#uses=1]
+ br i1 %cmp6, label %for.body.for.end_crit_edge, label %for.inc
-; CHECK: for.body.for.end_crit_edge:
for.body.for.end_crit_edge: ; preds = %for.body
- br label %for.end
+ br label %for.end
for.inc: ; preds = %for.body
- %indvar.next = add i32 %i.0, 1 ; <i32> [#uses=1]
- br label %for.cond
+ %indvar.next = add i32 %i.0, 1 ; <i32> [#uses=1]
+ br label %for.cond
for.end: ; preds = %for.body.for.end_crit_edge, %for.cond.for.end_crit_edge
- %tmp9 = load i32, ptr @p ; <i32> [#uses=1]
- ret i32 %tmp9
+ %tmp9 = load i32, ptr @p ; <i32> [#uses=1]
+ ret i32 %tmp9
}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/Transforms/GVN/PRE/preserve-tbaa.ll b/llvm/test/Transforms/GVN/PRE/preserve-tbaa.ll
index 3df63be..abbb17f 100644
--- a/llvm/test/Transforms/GVN/PRE/preserve-tbaa.ll
+++ b/llvm/test/Transforms/GVN/PRE/preserve-tbaa.ll
@@ -1,13 +1,45 @@
-; RUN: opt -passes=gvn -S < %s | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -passes=gvn -S < %s | FileCheck %s --check-prefixes=CHECK,MDEP
+; RUN: opt -passes='gvn<memoryssa>' -S < %s | FileCheck %s --check-prefixes=CHECK,MSSA
target datalayout = "e-p:64:64:64"
; GVN should preserve the TBAA tag on loads when doing PRE.
-; CHECK-LABEL: @test(
-; CHECK: %tmp33.pre = load i16, ptr %P, align 2, !tbaa !0
-; CHECK: br label %for.body
define void @test(ptr %P, ptr %Q, i1 %arg) nounwind {
+; MDEP-LABEL: define void @test(
+; MDEP-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], i1 [[ARG:%.*]]) #[[ATTR0:[0-9]+]] {
+; MDEP-NEXT: [[ENTRY:.*:]]
+; MDEP-NEXT: br i1 [[ARG]], label %[[BB_NPH:.*]], label %[[FOR_END:.*]]
+; MDEP: [[BB_NPH]]:
+; MDEP-NEXT: [[TMP33_PRE:%.*]] = load i16, ptr [[P]], align 2, !tbaa [[TBAA0:![0-9]+]]
+; MDEP-NEXT: br label %[[FOR_BODY:.*]]
+; MDEP: [[FOR_BODY]]:
+; MDEP-NEXT: [[TMP33:%.*]] = phi i16 [ 0, %[[FOR_BODY]] ], [ [[TMP33_PRE]], %[[BB_NPH]] ]
+; MDEP-NEXT: store i16 [[TMP33]], ptr [[Q]], align 2
+; MDEP-NEXT: store i16 0, ptr [[P]], align 2, !tbaa [[TBAA0]]
+; MDEP-NEXT: br i1 false, label %[[FOR_BODY_FOR_END_CRIT_EDGE:.*]], label %[[FOR_BODY]]
+; MDEP: [[FOR_BODY_FOR_END_CRIT_EDGE]]:
+; MDEP-NEXT: br label %[[FOR_END]]
+; MDEP: [[FOR_END]]:
+; MDEP-NEXT: ret void
+;
+; MSSA-LABEL: define void @test(
+; MSSA-SAME: ptr [[P:%.*]], ptr [[Q:%.*]], i1 [[ARG:%.*]]) #[[ATTR0:[0-9]+]] {
+; MSSA-NEXT: [[ENTRY:.*:]]
+; MSSA-NEXT: br i1 [[ARG]], label %[[BB_NPH:.*]], label %[[FOR_END:.*]]
+; MSSA: [[BB_NPH]]:
+; MSSA-NEXT: br label %[[FOR_BODY:.*]]
+; MSSA: [[FOR_BODY]]:
+; MSSA-NEXT: [[TMP33:%.*]] = load i16, ptr [[P]], align 2, !tbaa [[TBAA0:![0-9]+]]
+; MSSA-NEXT: store i16 [[TMP33]], ptr [[Q]], align 2
+; MSSA-NEXT: store i16 0, ptr [[P]], align 2, !tbaa [[TBAA0]]
+; MSSA-NEXT: br i1 false, label %[[FOR_BODY_FOR_END_CRIT_EDGE:.*]], label %[[FOR_BODY]]
+; MSSA: [[FOR_BODY_FOR_END_CRIT_EDGE]]:
+; MSSA-NEXT: br label %[[FOR_END]]
+; MSSA: [[FOR_END]]:
+; MSSA-NEXT: ret void
+;
entry:
br i1 %arg, label %bb.nph, label %for.end
@@ -29,3 +61,16 @@ for.end: ; preds = %for.body, %entry
!1 = !{!"omnipotent char", !2}
!2 = !{!"Simple C/C++ TBAA"}
!3 = !{!"short", !1}
+;.
+; MDEP: [[TBAA0]] = !{[[META1:![0-9]+]], [[META1]], i64 0}
+; MDEP: [[META1]] = !{!"short", [[META2:![0-9]+]]}
+; MDEP: [[META2]] = !{!"omnipotent char", [[META3:![0-9]+]]}
+; MDEP: [[META3]] = !{!"Simple C/C++ TBAA"}
+;.
+; MSSA: [[TBAA0]] = !{[[META1:![0-9]+]], [[META1]], i64 0}
+; MSSA: [[META1]] = !{!"short", [[META2:![0-9]+]]}
+; MSSA: [[META2]] = !{!"omnipotent char", [[META3:![0-9]+]]}
+; MSSA: [[META3]] = !{!"Simple C/C++ TBAA"}
+;.
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/Transforms/InstSimplify/ConstProp/WebAssembly/dot.ll b/llvm/test/Transforms/InstSimplify/ConstProp/WebAssembly/dot.ll
deleted file mode 100644
index b537b7b..0000000
--- a/llvm/test/Transforms/InstSimplify/ConstProp/WebAssembly/dot.ll
+++ /dev/null
@@ -1,56 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
-
-; RUN: opt -passes=instsimplify -S < %s | FileCheck %s
-
-; Test that intrinsics wasm dot call are constant folded
-
-target triple = "wasm32-unknown-unknown"
-
-
-define <4 x i32> @dot_zero() {
-; CHECK-LABEL: define <4 x i32> @dot_zero() {
-; CHECK-NEXT: ret <4 x i32> zeroinitializer
-;
- %res = tail call <4 x i32> @llvm.wasm.dot(<8 x i16> zeroinitializer, <8 x i16> zeroinitializer)
- ret <4 x i32> %res
-}
-
-; a = 1 2 3 4 5 6 7 8
-; b = 1 2 3 4 5 6 7 8
-; k1|k2 = a * b = 1 4 9 16 25 36 49 64
-; k1 + k2 = (1+4) | (9 + 16) | (25 + 36) | (49 + 64)
-; result = 5 | 25 | 61 | 113
-define <4 x i32> @dot_nonzero() {
-; CHECK-LABEL: define <4 x i32> @dot_nonzero() {
-; CHECK-NEXT: ret <4 x i32> <i32 5, i32 25, i32 61, i32 113>
-;
- %res = tail call <4 x i32> @llvm.wasm.dot(<8 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>, <8 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>)
- ret <4 x i32> %res
-}
-
-define <4 x i32> @dot_doubly_negative() {
-; CHECK-LABEL: define <4 x i32> @dot_doubly_negative() {
-; CHECK-NEXT: ret <4 x i32> splat (i32 2)
-;
- %res = tail call <4 x i32> @llvm.wasm.dot(<8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>, <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>)
- ret <4 x i32> %res
-}
-
-; Tests that i16 max signed values fit in i32
-define <4 x i32> @dot_follow_modulo_spec_1() {
-; CHECK-LABEL: define <4 x i32> @dot_follow_modulo_spec_1() {
-; CHECK-NEXT: ret <4 x i32> <i32 2147352578, i32 0, i32 0, i32 0>
-;
- %res = tail call <4 x i32> @llvm.wasm.dot(<8 x i16> <i16 32767, i16 32767, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, <8 x i16> <i16 32767, i16 32767, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>)
- ret <4 x i32> %res
-}
-
-; Tests that i16 min signed values fit in i32
-define <4 x i32> @dot_follow_modulo_spec_2() {
-; CHECK-LABEL: define <4 x i32> @dot_follow_modulo_spec_2() {
-; CHECK-NEXT: ret <4 x i32> <i32 -2147483648, i32 0, i32 0, i32 0>
-;
- %res = tail call <4 x i32> @llvm.wasm.dot(<8 x i16> <i16 -32768, i16 -32768, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, <8 x i16> <i16 -32768, i16 -32768, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>)
- ret <4 x i32> %res
-}
-
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll b/llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll
index 795de3d..a8d9a0c 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll
@@ -45,8 +45,8 @@ define void @clamped_tc_8(ptr nocapture %dst, i32 %n, i64 %val) vscale_range(1,1
; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi ptr [ [[DST]], [[ENTRY]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[P_OUT_TAIL_09:%.*]] = phi ptr [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[P_OUT_TAIL_09:%.*]] = phi ptr [ [[DST]], [[SCALAR_PH]] ], [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[TMP19:%.*]] = shl nuw nsw i64 [[INDVARS_IV]], 3
; CHECK-NEXT: [[SHR3:%.*]] = lshr i64 [[VAL]], [[TMP19]]
; CHECK-NEXT: [[CONV4:%.*]] = trunc i64 [[SHR3]] to i8
@@ -128,8 +128,8 @@ define void @clamped_tc_max_8(ptr nocapture %dst, i32 %n, i64 %val) vscale_range
; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi ptr [ [[DST]], [[FOR_BODY_PREHEADER]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[P_OUT_TAIL_09:%.*]] = phi ptr [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[P_OUT_TAIL_09:%.*]] = phi ptr [ [[DST]], [[SCALAR_PH]] ], [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[TMP19:%.*]] = shl nuw nsw i64 [[INDVARS_IV]], 3
; CHECK-NEXT: [[SHR3:%.*]] = lshr i64 [[VAL]], [[TMP19]]
; CHECK-NEXT: [[CONV4:%.*]] = trunc i64 [[SHR3]] to i8
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll
index 0232d88..4b895ae 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll
@@ -459,7 +459,7 @@ define void @latch_branch_cost(ptr %dst) {
; PRED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; PRED-NEXT: br label %[[LOOP:.*]]
; PRED: [[LOOP]]:
-; PRED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; PRED-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; PRED-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV]]
; PRED-NEXT: store i8 0, ptr [[GEP]], align 1
; PRED-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
@@ -738,8 +738,8 @@ define void @multiple_exit_conditions(ptr %src, ptr noalias %dst) #1 {
; PRED-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; PRED-NEXT: br label %[[LOOP:.*]]
; PRED: [[LOOP]]:
-; PRED-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], %[[LOOP]] ]
-; PRED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL1]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; PRED-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[DST]], %[[SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], %[[LOOP]] ]
+; PRED-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; PRED-NEXT: [[L:%.*]] = load i16, ptr [[SRC]], align 2
; PRED-NEXT: [[O:%.*]] = or i16 [[L]], 1
; PRED-NEXT: [[CONV:%.*]] = uitofp i16 [[O]] to double
@@ -865,7 +865,7 @@ define void @low_trip_count_fold_tail_scalarized_store(ptr %dst) {
; DEFAULT-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; DEFAULT-NEXT: br label %[[LOOP:.*]]
; DEFAULT: [[LOOP]]:
-; DEFAULT-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; DEFAULT-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; DEFAULT-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i8
; DEFAULT-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV]]
; DEFAULT-NEXT: store i8 [[IV_TRUNC]], ptr [[GEP]], align 1
@@ -967,7 +967,7 @@ define void @low_trip_count_fold_tail_scalarized_store(ptr %dst) {
; PRED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; PRED-NEXT: br label %[[LOOP:.*]]
; PRED: [[LOOP]]:
-; PRED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; PRED-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; PRED-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i8
; PRED-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV]]
; PRED-NEXT: store i8 [[IV_TRUNC]], ptr [[GEP]], align 1
@@ -1554,7 +1554,7 @@ define void @redundant_branch_and_tail_folding(ptr %dst, i1 %c) {
; PRED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; PRED-NEXT: br label %[[LOOP_HEADER:.*]]
; PRED: [[LOOP_HEADER]]:
-; PRED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
+; PRED-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
; PRED-NEXT: br i1 [[C]], label %[[LOOP_LATCH]], label %[[THEN:.*]]
; PRED: [[THEN]]:
; PRED-NEXT: br label %[[LOOP_LATCH]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/first-order-recurrence-fold-tail.ll b/llvm/test/Transforms/LoopVectorize/AArch64/first-order-recurrence-fold-tail.ll
index fff99f1..41a624b 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/first-order-recurrence-fold-tail.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/first-order-recurrence-fold-tail.ll
@@ -75,8 +75,8 @@ define i32 @test_phi_iterator_invalidation(ptr %A, ptr noalias %B) {
; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i16 [ 0, [[ENTRY]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[SCALAR_RECUR:%.*]] = phi i16 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[FOR_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[SCALAR_RECUR:%.*]] = phi i16 [ 0, [[SCALAR_PH]] ], [ [[FOR_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[SEXT:%.*]] = sext i16 [[SCALAR_RECUR]] to i32
; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr i32, ptr [[A]], i64 [[IV_NEXT]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll b/llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll
index 1471896..cc36cdb 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/optsize_minsize.ll
@@ -397,7 +397,7 @@ define void @tail_predicate_without_optsize(ptr %p, i8 %a, i8 %b, i8 %c, i32 %n)
; DEFAULT-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; DEFAULT-NEXT: br label %[[FOR_BODY:.*]]
; DEFAULT: [[FOR_BODY]]:
-; DEFAULT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; DEFAULT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
; DEFAULT-NEXT: [[TMP72:%.*]] = trunc nuw nsw i64 [[INDVARS_IV]] to i8
; DEFAULT-NEXT: [[MUL:%.*]] = mul i8 [[A]], [[TMP72]]
; DEFAULT-NEXT: [[SHR:%.*]] = lshr i8 [[TMP72]], 1
@@ -546,7 +546,7 @@ define void @sve_tail_predicate_without_minsize(ptr %p, i8 %a, i8 %b, i8 %c, i32
; DEFAULT-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; DEFAULT-NEXT: br label %[[FOR_BODY:.*]]
; DEFAULT: [[FOR_BODY]]:
-; DEFAULT-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; DEFAULT-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
; DEFAULT-NEXT: [[TMP26:%.*]] = trunc nuw nsw i64 [[IV]] to i8
; DEFAULT-NEXT: [[MUL:%.*]] = mul i8 [[A]], [[TMP26]]
; DEFAULT-NEXT: [[SHR:%.*]] = lshr i8 [[TMP26]], 1
@@ -621,7 +621,7 @@ define void @sve_tail_predicate_without_minsize(ptr %p, i8 %a, i8 %b, i8 %c, i32
; OPTSIZE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; OPTSIZE-NEXT: br label %[[FOR_BODY:.*]]
; OPTSIZE: [[FOR_BODY]]:
-; OPTSIZE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; OPTSIZE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
; OPTSIZE-NEXT: [[TMP26:%.*]] = trunc nuw nsw i64 [[IV]] to i8
; OPTSIZE-NEXT: [[MUL:%.*]] = mul i8 [[A]], [[TMP26]]
; OPTSIZE-NEXT: [[SHR:%.*]] = lshr i8 [[TMP26]], 1
@@ -696,7 +696,7 @@ define void @sve_tail_predicate_without_minsize(ptr %p, i8 %a, i8 %b, i8 %c, i32
; MINSIZE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; MINSIZE-NEXT: br label %[[FOR_BODY:.*]]
; MINSIZE: [[FOR_BODY]]:
-; MINSIZE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; MINSIZE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
; MINSIZE-NEXT: [[TMP26:%.*]] = trunc nuw nsw i64 [[IV]] to i8
; MINSIZE-NEXT: [[MUL:%.*]] = mul i8 [[A]], [[TMP26]]
; MINSIZE-NEXT: [[SHR:%.*]] = lshr i8 [[TMP26]], 1
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/pr151664-cost-hoisted-vector-scalable.ll b/llvm/test/Transforms/LoopVectorize/AArch64/pr151664-cost-hoisted-vector-scalable.ll
index 8495dee..b4df63d 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/pr151664-cost-hoisted-vector-scalable.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/pr151664-cost-hoisted-vector-scalable.ll
@@ -1,47 +1,28 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --filter-out-after "^scalar.ph" --version 5
-; REQUIRES: asserts
-; RUN: opt -passes=loop-vectorize -mtriple=aarch64 -mattr=+sve -S \
-; RUN: -debug-only=loop-vectorize %s 2>&1 | FileCheck %s
+; RUN: opt -passes=loop-vectorize -mtriple=aarch64 -mattr=+sve -S %s | FileCheck %s
-; FIXME: Hoisted vector code should be costed with scalable cost.
-; In this example, `<vscale x 4 x float> @llvm.minimumnum` has an invalid cost,
-; and hence should not be produced by LoopVectorize.
-
-; CHECK: LV: Found an estimated cost of Invalid for VF vscale x 4 For instruction: %res = tail call float @llvm.minimumnum.f32(float %arg, float 0.000000e+00)
define void @cost_hoisted_vector_code(ptr %p, float %arg) {
; CHECK-LABEL: define void @cost_hoisted_vector_code(
; CHECK-SAME: ptr [[P:%.*]], float [[ARG:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 8
; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
-; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 -1, [[TMP3]]
-; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 -1, [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 8
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[ARG]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x float> [[BROADCAST_SPLATINSERT]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP6:%.*]] = add i64 1, [[N_VEC]]
-; CHECK-NEXT: [[TMP7:%.*]] = call <vscale x 4 x float> @llvm.minimumnum.nxv4f32(<vscale x 4 x float> [[BROADCAST_SPLAT]], <vscale x 4 x float> zeroinitializer)
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[ARG]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x float> [[BROADCAST_SPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP0:%.*]] = call <4 x float> @llvm.minimumnum.v4f32(<4 x float> [[BROADCAST_SPLAT]], <4 x float> zeroinitializer)
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[INDEX:%.*]] = add i64 1, [[INDEX1]]
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr float, ptr [[P]], i64 [[INDEX]]
-; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
-; CHECK-NEXT: [[TMP11:%.*]] = getelementptr float, ptr [[TMP8]], i64 [[TMP10]]
-; CHECK-NEXT: store <vscale x 4 x float> [[TMP7]], ptr [[TMP8]], align 4
-; CHECK-NEXT: store <vscale x 4 x float> [[TMP7]], ptr [[TMP11]], align 4
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX1]], [[TMP5]]
-; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr float, ptr [[TMP8]], i32 4
+; CHECK-NEXT: store <4 x float> [[TMP0]], ptr [[TMP8]], align 4
+; CHECK-NEXT: store <4 x float> [[TMP0]], ptr [[TMP2]], align 4
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX1]], 8
+; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], -8
+; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 -1, [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]]
+; CHECK-NEXT: br label %[[SCALAR_PH]]
; CHECK: [[SCALAR_PH]]:
;
entry:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll b/llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll
index d9a3a71..830e7da 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/pr73894.ll
@@ -59,8 +59,8 @@ define i32 @pr70988(ptr %src, i32 %n) {
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[INDUC:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDUC_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[MAX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[TMP24:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[INDUC:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INDUC_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[MAX:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[TMP24:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[SRC]], i64 [[INDUC]]
; CHECK-NEXT: [[TMP22:%.*]] = load ptr, ptr [[GEP]], align 8
; CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll b/llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll
index 08d35f7..381d2e1 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/reduction-recurrence-costs-sve.ll
@@ -256,10 +256,10 @@ define i32 @chained_recurrences(i32 %x, i64 %y, ptr %src.1, i32 %z, ptr %src.2)
; PRED-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, %[[ENTRY]] ]
; PRED-NEXT: br label %[[LOOP:.*]]
; PRED: [[LOOP]]:
-; PRED-NEXT: [[TMP45:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[TMP53:%.*]], %[[LOOP]] ]
-; PRED-NEXT: [[SCALAR_RECUR10:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT8]], %[[SCALAR_PH]] ], [ [[TMP45]], %[[LOOP]] ]
-; PRED-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], %[[LOOP]] ]
-; PRED-NEXT: [[SUM_RED:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[RED_2:%.*]], %[[LOOP]] ]
+; PRED-NEXT: [[TMP45:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[TMP53:%.*]], %[[LOOP]] ]
+; PRED-NEXT: [[SCALAR_RECUR10:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[TMP45]], %[[LOOP]] ]
+; PRED-NEXT: [[IV1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], %[[LOOP]] ]
+; PRED-NEXT: [[SUM_RED:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[RED_2:%.*]], %[[LOOP]] ]
; PRED-NEXT: [[TMP52:%.*]] = add i64 [[Y]], 1
; PRED-NEXT: [[GEP_1:%.*]] = getelementptr i32, ptr [[SRC_1]], i64 [[TMP52]]
; PRED-NEXT: [[TMP53]] = load i32, ptr [[GEP_1]], align 4
@@ -491,8 +491,8 @@ define i16 @reduce_udiv(ptr %src, i16 %x, i64 %N) #0 {
; PRED-NEXT: [[BC_MERGE_RDX:%.*]] = phi i16 [ 0, %[[ENTRY]] ]
; PRED-NEXT: br label %[[LOOP:.*]]
; PRED: [[LOOP]]:
-; PRED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; PRED-NEXT: [[RED:%.*]] = phi i16 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ]
+; PRED-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; PRED-NEXT: [[RED:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ]
; PRED-NEXT: [[GEP:%.*]] = getelementptr i16, ptr [[SRC]], i64 [[IV]]
; PRED-NEXT: [[L:%.*]] = load i16, ptr [[GEP]], align 2
; PRED-NEXT: [[DIV:%.*]] = udiv i16 [[L]], [[X]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll
index a60d35d..0cad053 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-strict-fadd.ll
@@ -159,8 +159,8 @@ define float @fadd_strict(ptr noalias nocapture readonly %a, i64 %n) #0 {
; CHECK-ORDERED-TF-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ]
; CHECK-ORDERED-TF-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-ORDERED-TF: for.body:
-; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-ORDERED-TF-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
+; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-ORDERED-TF-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; CHECK-ORDERED-TF-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-ORDERED-TF-NEXT: [[ADD]] = fadd float [[TMP15]], [[SUM_07]]
@@ -420,8 +420,8 @@ define float @fadd_strict_unroll(ptr noalias nocapture readonly %a, i64 %n) #0 {
; CHECK-ORDERED-TF-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ]
; CHECK-ORDERED-TF-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-ORDERED-TF: for.body:
-; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-ORDERED-TF-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
+; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-ORDERED-TF-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; CHECK-ORDERED-TF-NEXT: [[TMP45:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-ORDERED-TF-NEXT: [[ADD]] = fadd float [[TMP45]], [[SUM_07]]
@@ -673,9 +673,9 @@ define void @fadd_strict_interleave(ptr noalias nocapture readonly %a, ptr noali
; CHECK-ORDERED-TF-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY]] ]
; CHECK-ORDERED-TF-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-ORDERED-TF: for.body:
-; CHECK-ORDERED-TF-NEXT: [[ADD_PHI1:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD2:%.*]], [[FOR_BODY]] ]
-; CHECK-ORDERED-TF-NEXT: [[ADD_PHI2:%.*]] = phi float [ [[BC_MERGE_RDX2]], [[SCALAR_PH]] ], [ [[ADD1:%.*]], [[FOR_BODY]] ]
-; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-ORDERED-TF-NEXT: [[ADD_PHI1:%.*]] = phi float [ [[A2]], [[SCALAR_PH]] ], [ [[ADD2:%.*]], [[FOR_BODY]] ]
+; CHECK-ORDERED-TF-NEXT: [[ADD_PHI2:%.*]] = phi float [ [[A1]], [[SCALAR_PH]] ], [ [[ADD1:%.*]], [[FOR_BODY]] ]
+; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-ORDERED-TF-NEXT: [[ARRAYIDXB1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
; CHECK-ORDERED-TF-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDXB1]], align 4
; CHECK-ORDERED-TF-NEXT: [[ADD1]] = fadd float [[TMP22]], [[ADD_PHI2]]
@@ -918,8 +918,8 @@ define float @fadd_of_sum(ptr noalias nocapture readonly %a, ptr noalias nocaptu
; CHECK-ORDERED-TF-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY_PREHEADER]] ]
; CHECK-ORDERED-TF-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-ORDERED-TF: for.body:
-; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-ORDERED-TF-NEXT: [[RES_014:%.*]] = phi float [ [[RDX:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
+; CHECK-ORDERED-TF-NEXT: [[RES_014:%.*]] = phi float [ [[RDX:%.*]], [[FOR_BODY]] ], [ 0.000000e+00, [[SCALAR_PH]] ]
; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; CHECK-ORDERED-TF-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
@@ -1148,8 +1148,8 @@ define float @fadd_conditional(ptr noalias nocapture readonly %a, ptr noalias no
; CHECK-ORDERED-TF-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 1.000000e+00, [[ENTRY]] ]
; CHECK-ORDERED-TF-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-ORDERED-TF: for.body:
-; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
-; CHECK-ORDERED-TF-NEXT: [[RES:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[FADD:%.*]], [[FOR_INC]] ]
+; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
+; CHECK-ORDERED-TF-NEXT: [[RES:%.*]] = phi float [ 1.000000e+00, [[SCALAR_PH]] ], [ [[FADD:%.*]], [[FOR_INC]] ]
; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
; CHECK-ORDERED-TF-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-ORDERED-TF-NEXT: [[TOBOOL:%.*]] = fcmp une float [[TMP18]], 0.000000e+00
@@ -1623,8 +1623,8 @@ define float @fmuladd_strict(ptr %a, ptr %b, i64 %n) #0 {
; CHECK-ORDERED-TF-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ]
; CHECK-ORDERED-TF-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-ORDERED-TF: for.body:
-; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-ORDERED-TF-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ]
+; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-ORDERED-TF-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ]
; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; CHECK-ORDERED-TF-NEXT: [[TMP59:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
@@ -1945,8 +1945,8 @@ define float @fmuladd_strict_fmf(ptr %a, ptr %b, i64 %n) #0 {
; CHECK-ORDERED-TF-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ]
; CHECK-ORDERED-TF-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-ORDERED-TF: for.body:
-; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-ORDERED-TF-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ]
+; CHECK-ORDERED-TF-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-ORDERED-TF-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ]
; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; CHECK-ORDERED-TF-NEXT: [[TMP59:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-ORDERED-TF-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll b/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll
index 51efbe9..d32b898 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/store-costs-sve.ll
@@ -114,7 +114,7 @@ define void @cost_store_i8(ptr %dst) #0 {
; PRED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; PRED-NEXT: br label [[LOOP:%.*]]
; PRED: loop:
-; PRED-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; PRED-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
; PRED-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV]]
; PRED-NEXT: store i8 0, ptr [[GEP]], align 1
; PRED-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-fixed-width-inorder-core.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-fixed-width-inorder-core.ll
index 20bc0af..76a7536 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-fixed-width-inorder-core.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-fixed-width-inorder-core.ll
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt < %s -mtriple=aarch64-none-elf -mcpu=cortex-a510 -mattr=+sve -passes=loop-vectorize -S | FileCheck %s --check-prefix=CHECK-CA510
; RUN: opt < %s -mtriple=aarch64-none-elf -mcpu=cortex-a520 -mattr=+sve -passes=loop-vectorize -S | FileCheck %s --check-prefix=CHECK-CA520
+; RUN: opt < %s -mtriple=aarch64-none-elf -mcpu=cortex-a320 -mattr=+sve -passes=loop-vectorize -S | FileCheck %s --check-prefix=CHECK-CA320
define void @sve_add(ptr %dst, ptr %a, ptr %b, i64 %n) {
; CHECK-CA510-LABEL: define void @sve_add(
@@ -131,6 +132,70 @@ define void @sve_add(ptr %dst, ptr %a, ptr %b, i64 %n) {
; CHECK-CA520: [[FOR_COND_CLEANUP]]:
; CHECK-CA520-NEXT: ret void
;
+; CHECK-CA320-LABEL: define void @sve_add(
+; CHECK-CA320-SAME: ptr [[DST:%.*]], ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-CA320-NEXT: [[ENTRY:.*:]]
+; CHECK-CA320-NEXT: [[B3:%.*]] = ptrtoint ptr [[B]] to i64
+; CHECK-CA320-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64
+; CHECK-CA320-NEXT: [[DST1:%.*]] = ptrtoint ptr [[DST]] to i64
+; CHECK-CA320-NEXT: [[CMP9_NOT:%.*]] = icmp eq i64 [[N]], 0
+; CHECK-CA320-NEXT: br i1 [[CMP9_NOT]], label %[[FOR_COND_CLEANUP:.*]], label %[[FOR_BODY_PREHEADER:.*]]
+; CHECK-CA320: [[FOR_BODY_PREHEADER]]:
+; CHECK-CA320-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 8
+; CHECK-CA320-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; CHECK-CA320: [[VECTOR_MEMCHECK]]:
+; CHECK-CA320-NEXT: [[TMP0:%.*]] = sub i64 [[DST1]], [[A2]]
+; CHECK-CA320-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP0]], 32
+; CHECK-CA320-NEXT: [[TMP1:%.*]] = sub i64 [[DST1]], [[B3]]
+; CHECK-CA320-NEXT: [[DIFF_CHECK4:%.*]] = icmp ult i64 [[TMP1]], 32
+; CHECK-CA320-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]]
+; CHECK-CA320-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
+; CHECK-CA320: [[VECTOR_PH]]:
+; CHECK-CA320-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 8
+; CHECK-CA320-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-CA320-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK-CA320: [[VECTOR_BODY]]:
+; CHECK-CA320-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-CA320-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDEX]]
+; CHECK-CA320-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw float, ptr [[TMP2]], i32 4
+; CHECK-CA320-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP2]], align 4
+; CHECK-CA320-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x float>, ptr [[TMP3]], align 4
+; CHECK-CA320-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[INDEX]]
+; CHECK-CA320-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw float, ptr [[TMP4]], i32 4
+; CHECK-CA320-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x float>, ptr [[TMP4]], align 4
+; CHECK-CA320-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x float>, ptr [[TMP5]], align 4
+; CHECK-CA320-NEXT: [[TMP6:%.*]] = fadd fast <4 x float> [[WIDE_LOAD6]], [[WIDE_LOAD]]
+; CHECK-CA320-NEXT: [[TMP7:%.*]] = fadd fast <4 x float> [[WIDE_LOAD7]], [[WIDE_LOAD5]]
+; CHECK-CA320-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw float, ptr [[DST]], i64 [[INDEX]]
+; CHECK-CA320-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw float, ptr [[TMP8]], i32 4
+; CHECK-CA320-NEXT: store <4 x float> [[TMP6]], ptr [[TMP8]], align 4
+; CHECK-CA320-NEXT: store <4 x float> [[TMP7]], ptr [[TMP9]], align 4
+; CHECK-CA320-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; CHECK-CA320-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-CA320-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-CA320: [[MIDDLE_BLOCK]]:
+; CHECK-CA320-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-CA320-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP_LOOPEXIT:.*]], label %[[SCALAR_PH]]
+; CHECK-CA320: [[SCALAR_PH]]:
+; CHECK-CA320-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[FOR_BODY_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
+; CHECK-CA320-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK-CA320: [[FOR_BODY]]:
+; CHECK-CA320-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; CHECK-CA320-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-CA320-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; CHECK-CA320-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-CA320-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
+; CHECK-CA320-NEXT: [[ADD:%.*]] = fadd fast float [[TMP12]], [[TMP11]]
+; CHECK-CA320-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw float, ptr [[DST]], i64 [[INDVARS_IV]]
+; CHECK-CA320-NEXT: store float [[ADD]], ptr [[ARRAYIDX4]], align 4
+; CHECK-CA320-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-CA320-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
+; CHECK-CA320-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP_LOOPEXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-CA320: [[FOR_COND_CLEANUP_LOOPEXIT]]:
+; CHECK-CA320-NEXT: br label %[[FOR_COND_CLEANUP]]
+; CHECK-CA320: [[FOR_COND_CLEANUP]]:
+; CHECK-CA320-NEXT: ret void
+;
entry:
%cmp9.not = icmp eq i64 %n, 0
br i1 %cmp9.not, label %for.cond.cleanup, label %for.body
@@ -160,3 +225,8 @@ for.cond.cleanup: ; preds = %for.cond.cleanup.lo
; CHECK-CA520: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
; CHECK-CA520: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]}
;.
+; CHECK-CA320: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
+; CHECK-CA320: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
+; CHECK-CA320: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
+; CHECK-CA320: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]}
+;.
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-low-trip-count.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-low-trip-count.ll
index ce7b78e..2b01018 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-low-trip-count.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-low-trip-count.ll
@@ -1,81 +1,100 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --filter-out-after "scalar.ph\:" --version 5
; RUN: opt -passes=loop-vectorize -S < %s | FileCheck %s
target triple = "aarch64-unknown-linux-gnu"
define void @trip7_i64(ptr noalias nocapture noundef %dst, ptr noalias nocapture noundef readonly %src) #0 {
-; CHECK-LABEL: @trip7_i64(
-; CHECK: = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: = mul nuw i64
-; CHECK: [[VSCALE:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[VF:%.*]] = mul nuw i64 [[VSCALE]], 2
-; CHECK: vector.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ]
-; CHECK: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ {{%.*}}, %vector.ph ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %vector.body ]
-; CHECK: {{%.*}} = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr {{%.*}}, i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i64> poison)
-; CHECK: {{%.*}} = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr {{%.*}}, i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i64> poison)
-; CHECK: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> {{%.*}}, ptr {{%.*}}, i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[VF]]
+; CHECK-LABEL: define void @trip7_i64(
+; CHECK-SAME: ptr noalias noundef captures(none) [[DST:%.*]], ptr noalias noundef readonly captures(none) [[SRC:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
+; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 7, [[TMP2]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
+; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 7)
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], %[[VECTOR_PH]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[SRC]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr [[TMP5]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i64> poison)
+; CHECK-NEXT: [[TMP6:%.*]] = shl nsw <vscale x 2 x i64> [[WIDE_MASKED_LOAD]], splat (i64 1)
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr [[TMP7]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i64> poison)
+; CHECK-NEXT: [[TMP8:%.*]] = add nsw <vscale x 2 x i64> [[WIDE_MASKED_LOAD1]], [[TMP6]]
+; CHECK-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[TMP8]], ptr [[TMP7]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
+; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP4]]
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 7)
-; CHECK-NEXT: [[ACTIVE_LANE_MASK_NOT:%.*]] = xor <vscale x 2 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
-; CHECK-NEXT: [[COND:%.*]] = extractelement <vscale x 2 x i1> [[ACTIVE_LANE_MASK_NOT]], i32 0
-; CHECK-NEXT: br i1 [[COND]], label %middle.block, label %vector.body
+; CHECK-NEXT: [[TMP9:%.*]] = xor <vscale x 2 x i1> [[ACTIVE_LANE_MASK_NEXT]], splat (i1 true)
+; CHECK-NEXT: [[TMP10:%.*]] = extractelement <vscale x 2 x i1> [[TMP9]], i32 0
+; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: br [[EXIT:label %.*]]
+; CHECK: [[SCALAR_PH]]:
;
entry:
- br label %for.body
+ br label %loop
-for.body: ; preds = %entry, %for.body
- %i.06 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i64, ptr %src, i64 %i.06
- %0 = load i64, ptr %arrayidx, align 8
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %gep.src = getelementptr inbounds i64, ptr %src, i64 %iv
+ %0 = load i64, ptr %gep.src, align 8
%mul = shl nsw i64 %0, 1
- %arrayidx1 = getelementptr inbounds i64, ptr %dst, i64 %i.06
- %1 = load i64, ptr %arrayidx1, align 8
+ %gep.dst = getelementptr inbounds i64, ptr %dst, i64 %iv
+ %1 = load i64, ptr %gep.dst, align 8
%add = add nsw i64 %1, %mul
- store i64 %add, ptr %arrayidx1, align 8
- %inc = add nuw nsw i64 %i.06, 1
- %exitcond.not = icmp eq i64 %inc, 7
- br i1 %exitcond.not, label %for.end, label %for.body
+ store i64 %add, ptr %gep.dst, align 8
+ %iv.next = add nuw nsw i64 %iv, 1
+ %ec = icmp eq i64 %iv.next, 7
+ br i1 %ec, label %exit, label %loop
-for.end: ; preds = %for.body
+exit:
ret void
}
define void @trip5_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture noundef readonly %src) #0 {
-; CHECK-LABEL: @trip5_i8(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 [[I_08]]
-; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-LABEL: define void @trip5_i8(
+; CHECK-SAME: ptr noalias noundef captures(none) [[DST:%.*]], ptr noalias noundef readonly captures(none) [[SRC:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[IV]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[GEP_SRC]], align 1
; CHECK-NEXT: [[MUL:%.*]] = shl i8 [[TMP0]], 1
-; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[DST:%.*]], i64 [[I_08]]
-; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
+; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[IV]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[GEP_DST]], align 1
; CHECK-NEXT: [[ADD:%.*]] = add i8 [[MUL]], [[TMP1]]
-; CHECK-NEXT: store i8 [[ADD]], ptr [[ARRAYIDX1]], align 1
-; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_08]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], 5
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]]
-; CHECK: for.end:
+; CHECK-NEXT: store i8 [[ADD]], ptr [[GEP_DST]], align 1
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 5
+; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]]
+; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
entry:
- br label %for.body
+ br label %loop
-for.body: ; preds = %entry, %for.body
- %i.08 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
- %arrayidx = getelementptr inbounds i8, ptr %src, i64 %i.08
- %0 = load i8, ptr %arrayidx, align 1
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %gep.src = getelementptr inbounds i8, ptr %src, i64 %iv
+ %0 = load i8, ptr %gep.src, align 1
%mul = shl i8 %0, 1
- %arrayidx1 = getelementptr inbounds i8, ptr %dst, i64 %i.08
- %1 = load i8, ptr %arrayidx1, align 1
+ %gep.dst = getelementptr inbounds i8, ptr %dst, i64 %iv
+ %1 = load i8, ptr %gep.dst, align 1
%add = add i8 %mul, %1
- store i8 %add, ptr %arrayidx1, align 1
- %inc = add nuw nsw i64 %i.08, 1
- %exitcond.not = icmp eq i64 %inc, 5
- br i1 %exitcond.not, label %for.end, label %for.body
+ store i8 %add, ptr %gep.dst, align 1
+ %iv.next = add nuw nsw i64 %iv, 1
+ %ec = icmp eq i64 %iv.next, 5
+ br i1 %ec, label %exit, label %loop
-for.end: ; preds = %for.body
+exit:
ret void
}
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll
index f4982e6..d6f8b8e 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding-reductions.ll
@@ -48,8 +48,8 @@ define i32 @add_reduction_i32(ptr %ptr, i64 %n) #0 {
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ]
; CHECK-NEXT: br label [[WHILE_BODY:%.*]]
; CHECK: while.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[RED:%.*]] = phi i32 [ [[RED_NEXT:%.*]], [[WHILE_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH]] ]
+; CHECK-NEXT: [[RED:%.*]] = phi i32 [ [[RED_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH]] ]
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[INDEX]]
; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[GEP]], align 4
; CHECK-NEXT: [[RED_NEXT]] = add i32 [[RED]], [[VAL]]
@@ -101,8 +101,8 @@ define i32 @add_reduction_i32(ptr %ptr, i64 %n) #0 {
; CHECK-IN-LOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ]
; CHECK-IN-LOOP-NEXT: br label [[WHILE_BODY:%.*]]
; CHECK-IN-LOOP: while.body:
-; CHECK-IN-LOOP-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-IN-LOOP-NEXT: [[RED:%.*]] = phi i32 [ [[RED_NEXT:%.*]], [[WHILE_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; CHECK-IN-LOOP-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH]] ]
+; CHECK-IN-LOOP-NEXT: [[RED:%.*]] = phi i32 [ [[RED_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH]] ]
; CHECK-IN-LOOP-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[INDEX]]
; CHECK-IN-LOOP-NEXT: [[VAL:%.*]] = load i32, ptr [[GEP]], align 4
; CHECK-IN-LOOP-NEXT: [[RED_NEXT]] = add i32 [[RED]], [[VAL]]
@@ -171,8 +171,8 @@ define float @add_reduction_f32(ptr %ptr, i64 %n) #0 {
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ]
; CHECK-NEXT: br label [[WHILE_BODY:%.*]]
; CHECK: while.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[RED:%.*]] = phi float [ [[RED_NEXT:%.*]], [[WHILE_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH]] ]
+; CHECK-NEXT: [[RED:%.*]] = phi float [ [[RED_NEXT:%.*]], [[WHILE_BODY]] ], [ 0.000000e+00, [[SCALAR_PH]] ]
; CHECK-NEXT: [[GEP:%.*]] = getelementptr float, ptr [[PTR]], i64 [[INDEX]]
; CHECK-NEXT: [[VAL:%.*]] = load float, ptr [[GEP]], align 4
; CHECK-NEXT: [[RED_NEXT]] = fadd float [[RED]], [[VAL]]
@@ -223,8 +223,8 @@ define float @add_reduction_f32(ptr %ptr, i64 %n) #0 {
; CHECK-IN-LOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ]
; CHECK-IN-LOOP-NEXT: br label [[WHILE_BODY:%.*]]
; CHECK-IN-LOOP: while.body:
-; CHECK-IN-LOOP-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-IN-LOOP-NEXT: [[RED:%.*]] = phi float [ [[RED_NEXT:%.*]], [[WHILE_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; CHECK-IN-LOOP-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH]] ]
+; CHECK-IN-LOOP-NEXT: [[RED:%.*]] = phi float [ [[RED_NEXT:%.*]], [[WHILE_BODY]] ], [ 0.000000e+00, [[SCALAR_PH]] ]
; CHECK-IN-LOOP-NEXT: [[GEP:%.*]] = getelementptr float, ptr [[PTR]], i64 [[INDEX]]
; CHECK-IN-LOOP-NEXT: [[VAL:%.*]] = load float, ptr [[GEP]], align 4
; CHECK-IN-LOOP-NEXT: [[RED_NEXT]] = fadd float [[RED]], [[VAL]]
@@ -298,8 +298,8 @@ define i32 @cond_xor_reduction(ptr noalias %a, ptr noalias %cond, i64 %N) #0 {
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 7, [[ENTRY]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
-; CHECK-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[RES:%.*]], [[FOR_INC]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
+; CHECK-NEXT: [[RDX:%.*]] = phi i32 [ 7, [[SCALAR_PH]] ], [ [[RES:%.*]], [[FOR_INC]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[COND]], i64 [[IV]]
; CHECK-NEXT: [[TMP26:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[TMP26]], 5
@@ -362,8 +362,8 @@ define i32 @cond_xor_reduction(ptr noalias %a, ptr noalias %cond, i64 %N) #0 {
; CHECK-IN-LOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 7, [[ENTRY]] ]
; CHECK-IN-LOOP-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-IN-LOOP: for.body:
-; CHECK-IN-LOOP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
-; CHECK-IN-LOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[RES:%.*]], [[FOR_INC]] ]
+; CHECK-IN-LOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
+; CHECK-IN-LOOP-NEXT: [[RDX:%.*]] = phi i32 [ 7, [[SCALAR_PH]] ], [ [[RES:%.*]], [[FOR_INC]] ]
; CHECK-IN-LOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[COND]], i64 [[IV]]
; CHECK-IN-LOOP-NEXT: [[TMP24:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-IN-LOOP-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[TMP24]], 5
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll
index 9929f35..5c6328e 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll
@@ -35,7 +35,6 @@ define void @pointer_induction_used_as_vector(ptr noalias %start.1, ptr noalias
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[START_2]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP11:%.*]] = mul i64 1, [[TMP6]]
; CHECK-NEXT: [[TMP13:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
; CHECK-NEXT: [[TMP15:%.*]] = mul <vscale x 2 x i64> [[TMP13]], splat (i64 1)
; CHECK-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 2 x i64> [[TMP15]]
@@ -48,6 +47,7 @@ define void @pointer_induction_used_as_vector(ptr noalias %start.1, ptr noalias
; CHECK-NEXT: [[TMP20:%.*]] = add <vscale x 2 x i8> [[WIDE_LOAD]], splat (i8 1)
; CHECK-NEXT: store <vscale x 2 x i8> [[TMP20]], ptr [[TMP18]], align 1
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
+; CHECK-NEXT: [[TMP11:%.*]] = mul i64 1, [[TMP6]]
; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP11]]
; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
@@ -119,7 +119,6 @@ define void @pointer_induction(ptr noalias %start, i64 %N) {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX2:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[START]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP10:%.*]] = mul i64 1, [[TMP6]]
; CHECK-NEXT: [[TMP12:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
; CHECK-NEXT: [[TMP14:%.*]] = mul <vscale x 2 x i64> [[TMP12]], splat (i64 1)
; CHECK-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 2 x i64> [[TMP14]]
@@ -128,6 +127,7 @@ define void @pointer_induction(ptr noalias %start, i64 %N) {
; CHECK-NEXT: [[TMP17:%.*]] = add <vscale x 2 x i8> [[WIDE_LOAD]], splat (i8 1)
; CHECK-NEXT: store <vscale x 2 x i8> [[TMP17]], ptr [[TMP15]], align 1
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX2]], [[TMP6]]
+; CHECK-NEXT: [[TMP10:%.*]] = mul i64 1, [[TMP6]]
; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP10]]
; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll
index 6947884..2c88e0e 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll
@@ -239,7 +239,6 @@ define i32 @pointer_iv_mixed(ptr noalias %a, ptr noalias %b, i64 %n) #0 {
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[A]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP12:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP8:%.*]] = shl nuw nsw i64 [[TMP5]], 3
; CHECK-NEXT: [[TMP9:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
; CHECK-NEXT: [[TMP10:%.*]] = shl <vscale x 2 x i64> [[TMP9]], splat (i64 2)
; CHECK-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 2 x i64> [[TMP10]]
@@ -250,6 +249,7 @@ define i32 @pointer_iv_mixed(ptr noalias %a, ptr noalias %b, i64 %n) #0 {
; CHECK-NEXT: [[TMP12]] = add <vscale x 2 x i32> [[WIDE_LOAD]], [[VEC_PHI]]
; CHECK-NEXT: store <vscale x 2 x ptr> [[VECTOR_GEP]], ptr [[NEXT_GEP]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = shl nuw nsw i64 [[TMP5]], 3
; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP8]]
; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
@@ -313,7 +313,6 @@ define void @phi_used_in_vector_compare_and_scalar_indvar_update_and_store(ptr %
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[PTR:%.*]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP3:%.*]] = shl nuw nsw i64 [[TMP0]], 2
; CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
; CHECK-NEXT: [[TMP5:%.*]] = shl <vscale x 2 x i64> [[TMP4]], splat (i64 1)
; CHECK-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 2 x i64> [[TMP5]]
@@ -321,6 +320,7 @@ define void @phi_used_in_vector_compare_and_scalar_indvar_update_and_store(ptr %
; CHECK-NEXT: [[TMP7:%.*]] = extractelement <vscale x 2 x ptr> [[VECTOR_GEP]], i64 0
; CHECK-NEXT: call void @llvm.masked.store.nxv2i16.p0(<vscale x 2 x i16> zeroinitializer, ptr [[TMP7]], i32 2, <vscale x 2 x i1> [[TMP6]])
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = shl nuw nsw i64 [[TMP0]], 2
; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP3]]
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/tail-folding-styles.ll b/llvm/test/Transforms/LoopVectorize/AArch64/tail-folding-styles.ll
index a11896a..124abc6 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/tail-folding-styles.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/tail-folding-styles.ll
@@ -80,7 +80,7 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features
; DATA-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; DATA-NEXT: br label [[WHILE_BODY:%.*]]
; DATA: while.body:
-; DATA-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
+; DATA-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH]] ]
; DATA-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[INDEX]]
; DATA-NEXT: store i32 [[VAL]], ptr [[GEP]], align 4
; DATA-NEXT: [[INDEX_NEXT]] = add nsw i64 [[INDEX]], 1
@@ -127,7 +127,7 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features
; DATA_NO_LANEMASK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; DATA_NO_LANEMASK-NEXT: br label [[WHILE_BODY:%.*]]
; DATA_NO_LANEMASK: while.body:
-; DATA_NO_LANEMASK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
+; DATA_NO_LANEMASK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH]] ]
; DATA_NO_LANEMASK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[INDEX]]
; DATA_NO_LANEMASK-NEXT: store i32 [[VAL]], ptr [[GEP]], align 4
; DATA_NO_LANEMASK-NEXT: [[INDEX_NEXT]] = add nsw i64 [[INDEX]], 1
@@ -169,7 +169,7 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features
; DATA_AND_CONTROL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; DATA_AND_CONTROL-NEXT: br label [[WHILE_BODY:%.*]]
; DATA_AND_CONTROL: while.body:
-; DATA_AND_CONTROL-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
+; DATA_AND_CONTROL-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH]] ]
; DATA_AND_CONTROL-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[INDEX]]
; DATA_AND_CONTROL-NEXT: store i32 [[VAL]], ptr [[GEP]], align 4
; DATA_AND_CONTROL-NEXT: [[INDEX_NEXT]] = add nsw i64 [[INDEX]], 1
@@ -216,7 +216,7 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features
; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: br label [[WHILE_BODY:%.*]]
; DATA_AND_CONTROL_NO_RT_CHECK: while.body:
-; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
+; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH]] ]
; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR]], i64 [[INDEX]]
; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: store i32 [[VAL]], ptr [[GEP]], align 4
; DATA_AND_CONTROL_NO_RT_CHECK-NEXT: [[INDEX_NEXT]] = add nsw i64 [[INDEX]], 1
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-remove-loop-region.ll b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-remove-loop-region.ll
index d0ea828..bd6a027 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-remove-loop-region.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-remove-loop-region.ll
@@ -116,7 +116,7 @@ define void @load_store_interleave_group_tc_2(ptr noalias %data) {
; VF4-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; VF4-NEXT: br label %[[LOOP:.*]]
; VF4: [[LOOP]]:
-; VF4-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; VF4-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; VF4-NEXT: [[MUL_2:%.*]] = shl nsw i64 [[IV]], 1
; VF4-NEXT: [[DATA_0:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[MUL_2]]
; VF4-NEXT: [[L_0:%.*]] = load i64, ptr [[DATA_0]], align 8
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-gather-scatter-tailpred.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-gather-scatter-tailpred.ll
index 66bb80b..59e65f7 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/mve-gather-scatter-tailpred.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-gather-scatter-tailpred.ll
@@ -30,7 +30,7 @@ define void @test_stride1_4i32(ptr readonly %data, ptr noalias nocapture %dst, i
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[I_023:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[I_023:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[I_023]], 1
; CHECK-NEXT: [[ADD5:%.*]] = add nuw nsw i32 [[MUL]], 2
; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, ptr [[DATA]], i32 [[ADD5]]
@@ -218,7 +218,7 @@ define void @test_stride3_4i32(ptr readonly %data, ptr noalias nocapture %dst, i
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[I_023:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[I_023:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[I_023]], 3
; CHECK-NEXT: [[ADD5:%.*]] = add nuw nsw i32 [[MUL]], 2
; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, ptr [[DATA]], i32 [[ADD5]]
@@ -280,7 +280,7 @@ define void @test_stride4_4i32(ptr readonly %data, ptr noalias nocapture %dst, i
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[I_023:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[I_023:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[I_023]], 4
; CHECK-NEXT: [[ADD5:%.*]] = add nuw nsw i32 [[MUL]], 2
; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, ptr [[DATA]], i32 [[ADD5]]
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-types.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-types.ll
index 83cb325..fd94673 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-types.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-reduction-types.ll
@@ -40,8 +40,8 @@ define i32 @mla_i32(ptr noalias nocapture readonly %A, ptr noalias nocapture rea
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[I_011:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[RES_010:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[I_011:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
+; CHECK-NEXT: [[RES_010:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i32 [[I_011]]
; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[CONV:%.*]] = sext i8 [[TMP12]] to i32
@@ -120,8 +120,8 @@ define i32 @mla_i8(ptr noalias nocapture readonly %A, ptr noalias nocapture read
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[I_011:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[RES_010:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[I_011:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
+; CHECK-NEXT: [[RES_010:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i32 [[I_011]]
; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[CONV:%.*]] = sext i8 [[TMP12]] to i32
@@ -195,8 +195,8 @@ define i32 @add_i32(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
+; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[I_08]]
; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD]] = add nsw i32 [[TMP7]], [[R_07]]
@@ -260,8 +260,8 @@ define i32 @mul_i32(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 1, [[FOR_BODY_PREHEADER]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
+; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 1, [[SCALAR_PH]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[I_08]]
; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD]] = mul nsw i32 [[TMP7]], [[R_07]]
@@ -325,8 +325,8 @@ define i32 @and_i32(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ -1, [[FOR_BODY_PREHEADER]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
+; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ -1, [[SCALAR_PH]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[I_08]]
; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD]] = and i32 [[TMP7]], [[R_07]]
@@ -390,8 +390,8 @@ define i32 @or_i32(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
+; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[I_08]]
; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD]] = or i32 [[TMP7]], [[R_07]]
@@ -455,8 +455,8 @@ define i32 @xor_i32(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
+; CHECK-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[I_08]]
; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD]] = xor i32 [[TMP7]], [[R_07]]
@@ -520,8 +520,8 @@ define float @fadd_f32(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[FOR_BODY_PREHEADER]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[R_07:%.*]] = phi float [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
+; CHECK-NEXT: [[R_07:%.*]] = phi float [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0.000000e+00, [[SCALAR_PH]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[X]], i32 [[I_08]]
; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD]] = fadd fast float [[TMP7]], [[R_07]]
@@ -585,8 +585,8 @@ define float @fmul_f32(ptr nocapture readonly %x, i32 %n) #0 {
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 1.000000e+00, [[FOR_BODY_PREHEADER]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[R_07:%.*]] = phi float [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
+; CHECK-NEXT: [[R_07:%.*]] = phi float [ [[ADD:%.*]], [[FOR_BODY]] ], [ 1.000000e+00, [[SCALAR_PH]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[X]], i32 [[I_08]]
; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ADD]] = fmul fast float [[TMP7]], [[R_07]]
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/optsize_minsize.ll b/llvm/test/Transforms/LoopVectorize/ARM/optsize_minsize.ll
index 0f4d40f..8fbeff5 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/optsize_minsize.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/optsize_minsize.ll
@@ -393,7 +393,7 @@ define void @tail_predicate_without_optsize(ptr %p, i8 %a, i8 %b, i8 %c, i32 %n)
; DEFAULT-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; DEFAULT-NEXT: br label %[[FOR_BODY:.*]]
; DEFAULT: [[FOR_BODY]]:
-; DEFAULT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; DEFAULT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
; DEFAULT-NEXT: [[TMP72:%.*]] = trunc nuw nsw i64 [[INDVARS_IV]] to i8
; DEFAULT-NEXT: [[MUL:%.*]] = mul i8 [[A]], [[TMP72]]
; DEFAULT-NEXT: [[SHR:%.*]] = lshr i8 [[TMP72]], 1
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll b/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll
index 5f13089..2b93668 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll
@@ -45,7 +45,7 @@ define void @test_wide_integer_induction(ptr noalias %a, i64 %N) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY1:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY1:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ]
+; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV1]]
; CHECK-NEXT: store i64 [[IV1]], ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1
@@ -74,16 +74,50 @@ define void @test_wide_ptr_induction(ptr noalias %a, ptr noalias %b, i64 %N) {
; CHECK-LABEL: define void @test_wide_ptr_induction(
; CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK: vector.ph:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
+; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], 1
+; CHECK-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP2]]
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
+; CHECK: vector.body:
+; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[B]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ [[N]], [[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP5:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
+; CHECK-NEXT: [[TMP6:%.*]] = mul <vscale x 2 x i64> [[TMP5]], splat (i64 8)
+; CHECK-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 2 x i64> [[TMP6]]
+; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true)
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[EVL_BASED_IV]]
+; CHECK-NEXT: call void @llvm.vp.store.nxv2p0.p0(<vscale x 2 x ptr> [[VECTOR_GEP]], ptr align 8 [[TMP8]], <vscale x 2 x i1> splat (i1 true), i32 [[TMP7]])
+; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP7]] to i64
+; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP9]], [[EVL_BASED_IV]]
+; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i64 [[AVL]], [[TMP9]]
+; CHECK-NEXT: [[TMP10:%.*]] = zext i32 [[TMP7]] to i64
+; CHECK-NEXT: [[TMP11:%.*]] = mul i64 8, [[TMP10]]
+; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP11]]
+; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK: middle.block:
+; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]]
+; CHECK: scalar.ph:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi ptr [ [[B]], [[ENTRY]] ]
+; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[ADDR:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[VECTOR_BODY]] ], [ [[B]], [[VECTOR_PH]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[ADDR:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[B]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[ADDR]], i64 8
-; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[EVL_BASED_IV]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: store ptr [[ADDR]], ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw nsw i64 [[EVL_BASED_IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDEX_EVL_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP:%.*]], label [[VECTOR_BODY]]
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: for.cond.cleanup:
; CHECK-NEXT: ret void
;
@@ -109,4 +143,6 @@ for.cond.cleanup:
; CHECK: [[META2]] = !{!"llvm.loop.isvectorized.tailfoldingstyle", !"evl"}
; CHECK: [[META3]] = !{!"llvm.loop.unroll.runtime.disable"}
; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META3]], [[META1]]}
+; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]], [[META3]]}
+; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META3]], [[META1]]}
;.
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll
index 6e2434a..1addff6 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll
@@ -151,8 +151,8 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) {
; IF-EVL-OUTLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ]
; IF-EVL-OUTLOOP-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL-OUTLOOP: for.body:
-; IF-EVL-OUTLOOP-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; IF-EVL-OUTLOOP-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; IF-EVL-OUTLOOP-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
+; IF-EVL-OUTLOOP-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
; IF-EVL-OUTLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[X]], i32 [[I_08]]
; IF-EVL-OUTLOOP-NEXT: [[TMP13:%.*]] = load i16, ptr [[ARRAYIDX]], align 2
; IF-EVL-OUTLOOP-NEXT: [[CONV:%.*]] = sext i16 [[TMP13]] to i32
@@ -204,8 +204,8 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) {
; IF-EVL-INLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ]
; IF-EVL-INLOOP-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL-INLOOP: for.body:
-; IF-EVL-INLOOP-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; IF-EVL-INLOOP-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; IF-EVL-INLOOP-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
+; IF-EVL-INLOOP-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
; IF-EVL-INLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[X]], i32 [[I_08]]
; IF-EVL-INLOOP-NEXT: [[TMP13:%.*]] = load i16, ptr [[ARRAYIDX]], align 2
; IF-EVL-INLOOP-NEXT: [[CONV:%.*]] = sext i16 [[TMP13]] to i32
@@ -372,8 +372,8 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-OUTLOOP-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL-OUTLOOP: for.body:
-; IF-EVL-OUTLOOP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-OUTLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SMIN:%.*]], [[FOR_BODY]] ]
+; IF-EVL-OUTLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-OUTLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[SMIN:%.*]], [[FOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-OUTLOOP-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-OUTLOOP-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[TMP19]], [[RDX]]
@@ -419,8 +419,8 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-INLOOP-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL-INLOOP: for.body:
-; IF-EVL-INLOOP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-INLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SMIN:%.*]], [[FOR_BODY]] ]
+; IF-EVL-INLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-INLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[SMIN:%.*]], [[FOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-INLOOP-NEXT: [[TMP16:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-INLOOP-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[TMP16]], [[RDX]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-cost.ll
index d6f16bf..056dc7e 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-cost.ll
@@ -87,6 +87,16 @@ entry:
; OPT-NF3: Cost of 7 for VF 16: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
; OPT-NF3: Cost of 14 for VF 32: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
; OPT-NF3: Cost of 14 for VF 32: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
+; OPT-NF3: Cost of 4 for VF vscale x 1: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
+; OPT-NF3: Cost of 4 for VF vscale x 1: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
+; OPT-NF3: Cost of 4 for VF vscale x 2: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
+; OPT-NF3: Cost of 4 for VF vscale x 2: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
+; OPT-NF3: Cost of 5 for VF vscale x 4: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
+; OPT-NF3: Cost of 5 for VF vscale x 4: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
+; OPT-NF3: Cost of 7 for VF vscale x 8: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
+; OPT-NF3: Cost of 7 for VF vscale x 8: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
+; OPT-NF3: Cost of 14 for VF vscale x 16: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
+; OPT-NF3: Cost of 14 for VF vscale x 16: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
; NO-OPT-LABEL: Checking a loop in 'i8_factor_3'
; NO-OPT: Cost of 6 for VF 2: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
; NO-OPT: Cost of 6 for VF 2: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
@@ -98,6 +108,16 @@ entry:
; NO-OPT: Cost of 48 for VF 16: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
; NO-OPT: Cost of 96 for VF 32: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
; NO-OPT: Cost of 96 for VF 32: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
+; NO-OPT: Cost of 6 for VF vscale x 1: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
+; NO-OPT: Cost of 6 for VF vscale x 1: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
+; NO-OPT: Cost of 12 for VF vscale x 2: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
+; NO-OPT: Cost of 12 for VF vscale x 2: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
+; NO-OPT: Cost of 24 for VF vscale x 4: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
+; NO-OPT: Cost of 24 for VF vscale x 4: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
+; NO-OPT: Cost of 48 for VF vscale x 8: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
+; NO-OPT: Cost of 48 for VF vscale x 8: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
+; NO-OPT: Cost of 96 for VF vscale x 16: INTERLEAVE-GROUP with factor 3 at %l0, ir<%p0>
+; NO-OPT: Cost of 96 for VF vscale x 16: INTERLEAVE-GROUP with factor 3 at <badref>, ir<%p0>
for.body:
%i = phi i64 [ 0, %entry ], [ %i.next, %for.body ]
%p0 = getelementptr inbounds %i8.3, ptr %data, i64 %i, i32 0
@@ -135,6 +155,16 @@ entry:
; OPT-NF4: Cost of 8 for VF 16: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
; OPT-NF4: Cost of 16 for VF 32: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
; OPT-NF4: Cost of 16 for VF 32: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
+; OPT-NF4: Cost of 5 for VF vscale x 1: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
+; OPT-NF4: Cost of 5 for VF vscale x 1: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
+; OPT-NF4: Cost of 5 for VF vscale x 2: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
+; OPT-NF4: Cost of 5 for VF vscale x 2: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
+; OPT-NF4: Cost of 6 for VF vscale x 4: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
+; OPT-NF4: Cost of 6 for VF vscale x 4: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
+; OPT-NF4: Cost of 8 for VF vscale x 8: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
+; OPT-NF4: Cost of 8 for VF vscale x 8: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
+; OPT-NF4: Cost of 16 for VF vscale x 16: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
+; OPT-NF4: Cost of 16 for VF vscale x 16: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
; NO-OPT-LABEL: Checking a loop in 'i8_factor_4'
; NO-OPT: Cost of 8 for VF 2: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
; NO-OPT: Cost of 8 for VF 2: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
@@ -146,6 +176,16 @@ entry:
; NO-OPT: Cost of 64 for VF 16: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
; NO-OPT: Cost of 128 for VF 32: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
; NO-OPT: Cost of 128 for VF 32: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
+; NO-OPT: Cost of 8 for VF vscale x 1: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
+; NO-OPT: Cost of 8 for VF vscale x 1: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
+; NO-OPT: Cost of 16 for VF vscale x 2: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
+; NO-OPT: Cost of 16 for VF vscale x 2: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
+; NO-OPT: Cost of 32 for VF vscale x 4: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
+; NO-OPT: Cost of 32 for VF vscale x 4: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
+; NO-OPT: Cost of 64 for VF vscale x 8: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
+; NO-OPT: Cost of 64 for VF vscale x 8: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
+; NO-OPT: Cost of 128 for VF vscale x 16: INTERLEAVE-GROUP with factor 4 at %l0, ir<%p0>
+; NO-OPT: Cost of 128 for VF vscale x 16: INTERLEAVE-GROUP with factor 4 at <badref>, ir<%p0>
for.body:
%i = phi i64 [ 0, %entry ], [ %i.next, %for.body ]
%p0 = getelementptr inbounds %i8.4, ptr %data, i64 %i, i32 0
@@ -185,6 +225,14 @@ entry:
; OPT-NF5: Cost of 9 for VF 8: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
; OPT-NF5: Cost of 13 for VF 16: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
; OPT-NF5: Cost of 13 for VF 16: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
+; OPT-NF5: Cost of 6 for VF vscale x 1: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
+; OPT-NF5: Cost of 6 for VF vscale x 1: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
+; OPT-NF5: Cost of 7 for VF vscale x 2: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
+; OPT-NF5: Cost of 7 for VF vscale x 2: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
+; OPT-NF5: Cost of 9 for VF vscale x 4: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
+; OPT-NF5: Cost of 9 for VF vscale x 4: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
+; OPT-NF5: Cost of 13 for VF vscale x 8: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
+; OPT-NF5: Cost of 13 for VF vscale x 8: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
; NO-OPT-LABEL: Checking a loop in 'i8_factor_5'
; NO-OPT: Cost of 10 for VF 2: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
; NO-OPT: Cost of 10 for VF 2: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
@@ -194,6 +242,14 @@ entry:
; NO-OPT: Cost of 40 for VF 8: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
; NO-OPT: Cost of 80 for VF 16: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
; NO-OPT: Cost of 80 for VF 16: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
+; NO-OPT: Cost of 10 for VF vscale x 1: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
+; NO-OPT: Cost of 10 for VF vscale x 1: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
+; NO-OPT: Cost of 20 for VF vscale x 2: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
+; NO-OPT: Cost of 20 for VF vscale x 2: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
+; NO-OPT: Cost of 40 for VF vscale x 4: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
+; NO-OPT: Cost of 40 for VF vscale x 4: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
+; NO-OPT: Cost of 80 for VF vscale x 8: INTERLEAVE-GROUP with factor 5 at %l0, ir<%p0>
+; NO-OPT: Cost of 80 for VF vscale x 8: INTERLEAVE-GROUP with factor 5 at <badref>, ir<%p0>
for.body:
%i = phi i64 [ 0, %entry ], [ %i.next, %for.body ]
%p0 = getelementptr inbounds %i8.5, ptr %data, i64 %i, i32 0
@@ -237,6 +293,14 @@ entry:
; OPT-NF6: Cost of 10 for VF 8: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
; OPT-NF6: Cost of 14 for VF 16: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
; OPT-NF6: Cost of 14 for VF 16: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
+; OPT-NF6: Cost of 7 for VF vscale x 1: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
+; OPT-NF6: Cost of 7 for VF vscale x 1: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
+; OPT-NF6: Cost of 8 for VF vscale x 2: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
+; OPT-NF6: Cost of 8 for VF vscale x 2: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
+; OPT-NF6: Cost of 10 for VF vscale x 4: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
+; OPT-NF6: Cost of 10 for VF vscale x 4: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
+; OPT-NF6: Cost of 14 for VF vscale x 8: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
+; OPT-NF6: Cost of 14 for VF vscale x 8: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
; NO-OPT-LABEL: Checking a loop in 'i8_factor_6'
; NO-OPT: Cost of 12 for VF 2: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
; NO-OPT: Cost of 12 for VF 2: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
@@ -246,6 +310,14 @@ entry:
; NO-OPT: Cost of 48 for VF 8: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
; NO-OPT: Cost of 96 for VF 16: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
; NO-OPT: Cost of 96 for VF 16: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
+; NO-OPT: Cost of 12 for VF vscale x 1: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
+; NO-OPT: Cost of 12 for VF vscale x 1: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
+; NO-OPT: Cost of 24 for VF vscale x 2: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
+; NO-OPT: Cost of 24 for VF vscale x 2: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
+; NO-OPT: Cost of 48 for VF vscale x 4: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
+; NO-OPT: Cost of 48 for VF vscale x 4: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
+; NO-OPT: Cost of 96 for VF vscale x 8: INTERLEAVE-GROUP with factor 6 at %l0, ir<%p0>
+; NO-OPT: Cost of 96 for VF vscale x 8: INTERLEAVE-GROUP with factor 6 at <badref>, ir<%p0>
for.body:
%i = phi i64 [ 0, %entry ], [ %i.next, %for.body ]
%p0 = getelementptr inbounds %i8.6, ptr %data, i64 %i, i32 0
@@ -293,6 +365,14 @@ entry:
; OPT-NF7: Cost of 11 for VF 8: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
; OPT-NF7: Cost of 15 for VF 16: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
; OPT-NF7: Cost of 15 for VF 16: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
+; OPT-NF7: Cost of 8 for VF vscale x 1: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
+; OPT-NF7: Cost of 8 for VF vscale x 1: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
+; OPT-NF7: Cost of 9 for VF vscale x 2: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
+; OPT-NF7: Cost of 9 for VF vscale x 2: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
+; OPT-NF7: Cost of 11 for VF vscale x 4: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
+; OPT-NF7: Cost of 11 for VF vscale x 4: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
+; OPT-NF7: Cost of 15 for VF vscale x 8: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
+; OPT-NF7: Cost of 15 for VF vscale x 8: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
; NO-OPT-LABEL: Checking a loop in 'i8_factor_7'
; NO-OPT: Cost of 14 for VF 2: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
; NO-OPT: Cost of 14 for VF 2: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
@@ -302,6 +382,14 @@ entry:
; NO-OPT: Cost of 56 for VF 8: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
; NO-OPT: Cost of 112 for VF 16: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
; NO-OPT: Cost of 112 for VF 16: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
+; NO-OPT: Cost of 14 for VF vscale x 1: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
+; NO-OPT: Cost of 14 for VF vscale x 1: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
+; NO-OPT: Cost of 28 for VF vscale x 2: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
+; NO-OPT: Cost of 28 for VF vscale x 2: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
+; NO-OPT: Cost of 56 for VF vscale x 4: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
+; NO-OPT: Cost of 56 for VF vscale x 4: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
+; NO-OPT: Cost of 112 for VF vscale x 8: INTERLEAVE-GROUP with factor 7 at %l0, ir<%p0>
+; NO-OPT: Cost of 112 for VF vscale x 8: INTERLEAVE-GROUP with factor 7 at <badref>, ir<%p0>
for.body:
%i = phi i64 [ 0, %entry ], [ %i.next, %for.body ]
%p0 = getelementptr inbounds %i8.7, ptr %data, i64 %i, i32 0
@@ -353,6 +441,14 @@ entry:
; OPT-NF8: Cost of 12 for VF 8: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
; OPT-NF8: Cost of 16 for VF 16: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
; OPT-NF8: Cost of 16 for VF 16: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
+; OPT-NF8: Cost of 9 for VF vscale x 1: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
+; OPT-NF8: Cost of 9 for VF vscale x 1: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
+; OPT-NF8: Cost of 10 for VF vscale x 2: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
+; OPT-NF8: Cost of 10 for VF vscale x 2: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
+; OPT-NF8: Cost of 12 for VF vscale x 4: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
+; OPT-NF8: Cost of 12 for VF vscale x 4: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
+; OPT-NF8: Cost of 16 for VF vscale x 8: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
+; OPT-NF8: Cost of 16 for VF vscale x 8: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
; NO-OPT-LABEL: Checking a loop in 'i8_factor_8'
; NO-OPT: Cost of 16 for VF 2: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
; NO-OPT: Cost of 16 for VF 2: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
@@ -362,6 +458,14 @@ entry:
; NO-OPT: Cost of 64 for VF 8: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
; NO-OPT: Cost of 128 for VF 16: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
; NO-OPT: Cost of 128 for VF 16: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
+; NO-OPT: Cost of 16 for VF vscale x 1: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
+; NO-OPT: Cost of 16 for VF vscale x 1: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
+; NO-OPT: Cost of 32 for VF vscale x 2: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
+; NO-OPT: Cost of 32 for VF vscale x 2: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
+; NO-OPT: Cost of 64 for VF vscale x 4: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
+; NO-OPT: Cost of 64 for VF vscale x 4: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
+; NO-OPT: Cost of 128 for VF vscale x 8: INTERLEAVE-GROUP with factor 8 at %l0, ir<%p0>
+; NO-OPT: Cost of 128 for VF vscale x 8: INTERLEAVE-GROUP with factor 8 at <badref>, ir<%p0>
for.body:
%i = phi i64 [ 0, %entry ], [ %i.next, %for.body ]
%p0 = getelementptr inbounds %i8.8, ptr %data, i64 %i, i32 0
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll b/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll
index 0a87257..32cb426 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/low-trip-count.ll
@@ -146,7 +146,7 @@ define void @trip8_i8(ptr noalias nocapture noundef %dst, ptr noalias nocapture
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[I_08:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[TMP9]], i64 [[I_08]]
; CHECK-NEXT: [[TMP15:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[MUL:%.*]] = shl i8 [[TMP15]], 1
@@ -379,8 +379,8 @@ define i8 @mul_non_pow_2_low_trip_count(ptr noalias %a) {
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i8 [ 2, [[ENTRY]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[RDX:%.*]] = phi i8 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MUL:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[RDX:%.*]] = phi i8 [ 2, [[SCALAR_PH]] ], [ [[MUL:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[TMP5:%.*]] = load i8, ptr [[GEP]], align 1
; CHECK-NEXT: [[MUL]] = mul i8 [[TMP5]], [[RDX]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/only-compute-cost-for-vplan-vfs.ll b/llvm/test/Transforms/LoopVectorize/RISCV/only-compute-cost-for-vplan-vfs.ll
index 0afe04e..07a7b7b 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/only-compute-cost-for-vplan-vfs.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/only-compute-cost-for-vplan-vfs.ll
@@ -1,29 +1,36 @@
-; RUN: opt -passes=loop-vectorize \
-; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
-; RUN: -mtriple=riscv64 -mattr=+v -S -debug %s 2>&1 | FileCheck %s
+; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v -S -debug %s 2>&1 | FileCheck %s
; REQUIRES: asserts
-; Make sure we do not vectorize a loop with a widened pointer induction.
-define void @test_wide_pointer_induction(ptr noalias %a, i64 %N) {
+; For %for.1, we are fine initially, because the previous value %for.1.next dominates the
+; user of %for.1. But for %for.2, we have to sink the user (%for.1.next) past the previous
+; value %for.2.next. This however breaks the condition we have for %for.1. We cannot fix
+; both first order recurrences and cannot vectorize the loop.
+;
+; Make sure we don't compute costs if there are no vector VPlans.
+
; CHECK-NOT: LV: Vector loop of width {{.+}} costs:
;
-; CHECK: define void @test_wide_pointer_induction(
+; CHECK: define i32 @test(
; CHECK-NOT: vector.body
;
+define i32 @test(i32 %N) {
entry:
- br label %loop
+ br label %for.body
-loop:
- %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
- %iv.ptr = phi ptr [ %a, %entry ], [ %iv.ptr.next, %loop ]
- %arrayidx = getelementptr inbounds i64, ptr %a, i64 %iv
- store ptr %iv.ptr, ptr %arrayidx, align 8
- %iv.next = add nuw nsw i64 %iv, 1
- %iv.ptr.next = getelementptr i64, ptr %iv.ptr, i32 1
- %exitcond.not = icmp eq i64 %iv.next, %N
- br i1 %exitcond.not, label %exit, label %loop
+for.body: ; preds = %for.body.preheader, %for.body
+ %iv = phi i32 [ %inc, %for.body ], [ 10, %entry ]
+ %for.1 = phi i32 [ %for.1.next, %for.body ], [ 20, %entry ]
+ %for.2 = phi i32 [ %for.2.next, %for.body ], [ 11, %entry ]
+ %for.1.next = add nsw i32 %for.2, 1
+ %for.2.next = shl i32 %for.1, 24
+ %inc = add nsw i32 %iv, 1
+ %exitcond = icmp eq i32 %inc, %N
+ br i1 %exitcond, label %for.cond1.for.end_crit_edge, label %for.body
-exit:
- ret void
+for.cond1.for.end_crit_edge: ; preds = %for.body
+ %add.lcssa = phi i32 [ %for.1.next, %for.body ]
+ %sext.lcssa = phi i32 [ %for.2.next, %for.body ]
+ %res = add i32 %add.lcssa, %sext.lcssa
+ ret i32 %res
}
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll b/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll
index 01df436..d41d47a 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/pr88802.ll
@@ -58,7 +58,7 @@ define void @test(ptr %p, i64 %a, i8 %b) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_COND1:%.*]]
; CHECK: for.cond:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH1]] ], [ [[ADD:%.*]], [[FOR_BODY:%.*]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH1]] ], [ [[ADD:%.*]], [[FOR_BODY:%.*]] ]
; CHECK-NEXT: [[ADD]] = add i32 [[IV]], 1
; CHECK-NEXT: [[CMP_SLT:%.*]] = icmp slt i32 [[IV]], 2
; CHECK-NEXT: [[SHL:%.*]] = shl i64 [[A]], 48
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll
index ed50796..c037b70 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll
@@ -41,7 +41,7 @@ define void @vector_add(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[ADD:%.*]] = add i64 [[ELEM]], [[V]]
@@ -106,7 +106,7 @@ define void @indexed_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[BADDR:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV]]
; CHECK-NEXT: [[AIDX:%.*]] = load i64, ptr [[BADDR]], align 8
; CHECK-NEXT: [[AADDR:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[AIDX]]
@@ -172,8 +172,8 @@ define i64 @indexed_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i64
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ 0, [[ENTRY]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT: [[SUM:%.*]] = phi i64 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[SUM:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[BADDR:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV]]
; CHECK-NEXT: [[AIDX:%.*]] = load i64, ptr [[BADDR]], align 8
; CHECK-NEXT: [[AADDR:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[AIDX]]
@@ -238,7 +238,7 @@ define void @splat_int(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
@@ -296,7 +296,7 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: store i64 [[V]], ptr [[B]], align 8
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
@@ -417,7 +417,7 @@ define void @vector_add_trip1024(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[ELEM:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[ADD:%.*]] = add i64 [[ELEM]], [[V]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll b/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll
index 9e492c6..df907dc 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll
@@ -170,7 +170,6 @@ define void @single_constant_stride_ptr_iv(ptr %p) {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[P]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP12:%.*]] = mul i64 8, [[TMP8]]
; CHECK-NEXT: [[TMP14:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
; CHECK-NEXT: [[TMP16:%.*]] = mul <vscale x 4 x i64> [[TMP14]], splat (i64 8)
; CHECK-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 4 x i64> [[TMP16]]
@@ -181,6 +180,7 @@ define void @single_constant_stride_ptr_iv(ptr %p) {
; CHECK-NEXT: [[TMP20:%.*]] = add <vscale x 4 x i32> [[TMP19]], splat (i32 1)
; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP20]], <vscale x 4 x ptr> [[VECTOR_GEP]], i32 4, <vscale x 4 x i1> splat (i1 true))
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
+; CHECK-NEXT: [[TMP12:%.*]] = mul i64 8, [[TMP8]]
; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP12]]
; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
@@ -753,13 +753,11 @@ define void @double_stride_ptr_iv(ptr %p, ptr %p2, i64 %stride) {
; STRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; STRIDED-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[P]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
; STRIDED-NEXT: [[POINTER_PHI11:%.*]] = phi ptr [ [[P2]], [[VECTOR_PH]] ], [ [[PTR_IND12:%.*]], [[VECTOR_BODY]] ]
-; STRIDED-NEXT: [[TMP17:%.*]] = mul i64 [[STRIDE]], [[TMP13]]
; STRIDED-NEXT: [[TMP19:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
; STRIDED-NEXT: [[DOTSPLATINSERT9:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[STRIDE]], i64 0
; STRIDED-NEXT: [[DOTSPLAT10:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT9]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
; STRIDED-NEXT: [[TMP18:%.*]] = mul <vscale x 4 x i64> [[TMP19]], [[DOTSPLAT10]]
; STRIDED-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI11]], <vscale x 4 x i64> [[TMP18]]
-; STRIDED-NEXT: [[TMP25:%.*]] = mul i64 [[STRIDE]], [[TMP13]]
; STRIDED-NEXT: [[TMP27:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
; STRIDED-NEXT: [[TMP21:%.*]] = mul <vscale x 4 x i64> [[TMP27]], [[DOTSPLAT10]]
; STRIDED-NEXT: [[VECTOR_GEP7:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 4 x i64> [[TMP21]]
@@ -767,7 +765,9 @@ define void @double_stride_ptr_iv(ptr %p, ptr %p2, i64 %stride) {
; STRIDED-NEXT: [[TMP30:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], splat (i32 1)
; STRIDED-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP30]], <vscale x 4 x ptr> [[VECTOR_GEP]], i32 4, <vscale x 4 x i1> splat (i1 true)), !alias.scope [[META18:![0-9]+]], !noalias [[META15]]
; STRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP13]]
+; STRIDED-NEXT: [[TMP25:%.*]] = mul i64 [[STRIDE]], [[TMP13]]
; STRIDED-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP25]]
+; STRIDED-NEXT: [[TMP17:%.*]] = mul i64 [[STRIDE]], [[TMP13]]
; STRIDED-NEXT: [[PTR_IND12]] = getelementptr i8, ptr [[POINTER_PHI11]], i64 [[TMP17]]
; STRIDED-NEXT: [[TMP31:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; STRIDED-NEXT: br i1 [[TMP31]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll
index ce2b790..2be74e5 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll
@@ -1330,7 +1330,7 @@ define void @vp_ptrtoint(ptr %a, ptr %b, i64 %N) {
; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; IF-EVL-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[GEP]] to i64
; IF-EVL-NEXT: [[GEP2:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll
index d02d53b..76a830a 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll
@@ -57,8 +57,8 @@ define i32 @cond_add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-OUTLOOP-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL-OUTLOOP: for.body:
-; IF-EVL-OUTLOOP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-OUTLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
+; IF-EVL-OUTLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-OUTLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-OUTLOOP-NEXT: [[TMP27:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-OUTLOOP-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP27]], 3
@@ -108,8 +108,8 @@ define i32 @cond_add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-INLOOP-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL-INLOOP: for.body:
-; IF-EVL-INLOOP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-INLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
+; IF-EVL-INLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-INLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-INLOOP-NEXT: [[TMP25:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-INLOOP-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP25]], 3
@@ -285,8 +285,8 @@ define i32 @cond_add_pred(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-OUTLOOP-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL-OUTLOOP: for.body:
-; IF-EVL-OUTLOOP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
-; IF-EVL-OUTLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[RDX_ADD:%.*]], [[FOR_INC]] ]
+; IF-EVL-OUTLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
+; IF-EVL-OUTLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[RDX_ADD:%.*]], [[FOR_INC]] ]
; IF-EVL-OUTLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-OUTLOOP-NEXT: [[TMP28:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-OUTLOOP-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP28]], 3
@@ -339,8 +339,8 @@ define i32 @cond_add_pred(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-INLOOP-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL-INLOOP: for.body:
-; IF-EVL-INLOOP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
-; IF-EVL-INLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[RDX_ADD:%.*]], [[FOR_INC]] ]
+; IF-EVL-INLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
+; IF-EVL-INLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[RDX_ADD:%.*]], [[FOR_INC]] ]
; IF-EVL-INLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-INLOOP-NEXT: [[TMP25:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-INLOOP-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP25]], 3
@@ -537,8 +537,8 @@ define i32 @step_cond_add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-OUTLOOP-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL-OUTLOOP: for.body:
-; IF-EVL-OUTLOOP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-OUTLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
+; IF-EVL-OUTLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-OUTLOOP-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
; IF-EVL-OUTLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-OUTLOOP-NEXT: [[TMP37:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-OUTLOOP-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i32
@@ -597,8 +597,8 @@ define i32 @step_cond_add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-INLOOP-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL-INLOOP: for.body:
-; IF-EVL-INLOOP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-INLOOP-NEXT: [[RDX1:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD1:%.*]], [[FOR_BODY]] ]
+; IF-EVL-INLOOP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-INLOOP-NEXT: [[RDX1:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ADD1:%.*]], [[FOR_BODY]] ]
; IF-EVL-INLOOP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-INLOOP-NEXT: [[TMP28:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-INLOOP-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i32
@@ -804,8 +804,8 @@ define i32 @step_cond_add_pred(ptr %a, i64 %n, i32 %start) {
; IF-EVL-OUTLOOP-NEXT: [[BC_MERGE_RDX1:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-OUTLOOP-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL-OUTLOOP: for.body:
-; IF-EVL-OUTLOOP-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[MIDDLE_BLOCK:%.*]] ]
-; IF-EVL-OUTLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX1]], [[SCALAR_PH]] ], [ [[RDX_ADD:%.*]], [[MIDDLE_BLOCK]] ]
+; IF-EVL-OUTLOOP-NEXT: [[IV1:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[MIDDLE_BLOCK:%.*]] ]
+; IF-EVL-OUTLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[RDX_ADD:%.*]], [[MIDDLE_BLOCK]] ]
; IF-EVL-OUTLOOP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV1]]
; IF-EVL-OUTLOOP-NEXT: [[TMP38:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4
; IF-EVL-OUTLOOP-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV1]] to i32
@@ -867,8 +867,8 @@ define i32 @step_cond_add_pred(ptr %a, i64 %n, i32 %start) {
; IF-EVL-INLOOP-NEXT: [[BC_MERGE_RDX1:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-INLOOP-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL-INLOOP: for.body:
-; IF-EVL-INLOOP-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[MIDDLE_BLOCK:%.*]] ]
-; IF-EVL-INLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX1]], [[SCALAR_PH]] ], [ [[RDX_ADD:%.*]], [[MIDDLE_BLOCK]] ]
+; IF-EVL-INLOOP-NEXT: [[IV1:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[MIDDLE_BLOCK:%.*]] ]
+; IF-EVL-INLOOP-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[RDX_ADD:%.*]], [[MIDDLE_BLOCK]] ]
; IF-EVL-INLOOP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV1]]
; IF-EVL-INLOOP-NEXT: [[TMP35:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4
; IF-EVL-INLOOP-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV1]] to i32
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll
index ae047f5..a216aa8 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll
@@ -45,7 +45,7 @@ define void @test_sdiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[LOOP_PREHEADER]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; IF-EVL-NEXT: [[A_GEP:%.*]] = getelementptr i64, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP16:%.*]] = load i64, ptr [[A_GEP]], align 8
; IF-EVL-NEXT: [[B_GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]]
@@ -166,7 +166,7 @@ define void @test_udiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[LOOP_PREHEADER]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; IF-EVL-NEXT: [[A_GEP:%.*]] = getelementptr i64, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP16:%.*]] = load i64, ptr [[A_GEP]], align 8
; IF-EVL-NEXT: [[B_GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]]
@@ -286,7 +286,7 @@ define void @test_srem(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[LOOP_PREHEADER]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; IF-EVL-NEXT: [[A_GEP:%.*]] = getelementptr i64, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP16:%.*]] = load i64, ptr [[A_GEP]], align 8
; IF-EVL-NEXT: [[B_GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]]
@@ -406,7 +406,7 @@ define void @test_urem(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[LOOP_PREHEADER]] ]
; IF-EVL-NEXT: br label %[[LOOP:.*]]
; IF-EVL: [[LOOP]]:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; IF-EVL-NEXT: [[A_GEP:%.*]] = getelementptr i64, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP16:%.*]] = load i64, ptr [[A_GEP]], align 8
; IF-EVL-NEXT: [[B_GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll
index 987f946..f92bf5a 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll
@@ -53,8 +53,8 @@ define void @first_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
; IF-EVL-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ 33, %[[ENTRY]] ]
; IF-EVL-NEXT: br label %[[FOR_BODY:.*]]
; IF-EVL: [[FOR_BODY]]:
-; IF-EVL-NEXT: [[INDVARS:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_NEXT:%.*]], %[[FOR_BODY]] ]
-; IF-EVL-NEXT: [[FOR1:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[TMP24:%.*]], %[[FOR_BODY]] ]
+; IF-EVL-NEXT: [[INDVARS:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_NEXT:%.*]], %[[FOR_BODY]] ]
+; IF-EVL-NEXT: [[FOR1:%.*]] = phi i32 [ 33, %[[SCALAR_PH]] ], [ [[TMP24:%.*]], %[[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[INDVARS]]
; IF-EVL-NEXT: [[TMP24]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[FOR1]], [[TMP24]]
@@ -192,9 +192,9 @@ define void @second_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
; IF-EVL-NEXT: [[SCALAR_RECUR_INIT3:%.*]] = phi i32 [ 22, %[[ENTRY]] ]
; IF-EVL-NEXT: br label %[[FOR_BODY:.*]]
; IF-EVL: [[FOR_BODY]]:
-; IF-EVL-NEXT: [[INDVARS:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_NEXT:%.*]], %[[FOR_BODY]] ]
-; IF-EVL-NEXT: [[FOR1:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[TMP31:%.*]], %[[FOR_BODY]] ]
-; IF-EVL-NEXT: [[FOR2:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT3]], %[[SCALAR_PH]] ], [ [[FOR1]], %[[FOR_BODY]] ]
+; IF-EVL-NEXT: [[INDVARS:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_NEXT:%.*]], %[[FOR_BODY]] ]
+; IF-EVL-NEXT: [[FOR1:%.*]] = phi i32 [ 33, %[[SCALAR_PH]] ], [ [[TMP31:%.*]], %[[FOR_BODY]] ]
+; IF-EVL-NEXT: [[FOR2:%.*]] = phi i32 [ 22, %[[SCALAR_PH]] ], [ [[FOR1]], %[[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[INDVARS]]
; IF-EVL-NEXT: [[TMP31]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[FOR1]], [[FOR2]]
@@ -353,10 +353,10 @@ define void @third_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
; IF-EVL-NEXT: [[SCALAR_RECUR_INIT6:%.*]] = phi i32 [ 11, %[[ENTRY]] ]
; IF-EVL-NEXT: br label %[[FOR_BODY:.*]]
; IF-EVL: [[FOR_BODY]]:
-; IF-EVL-NEXT: [[INDVARS:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_NEXT:%.*]], %[[FOR_BODY]] ]
-; IF-EVL-NEXT: [[FOR1:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[TMP38:%.*]], %[[FOR_BODY]] ]
-; IF-EVL-NEXT: [[FOR2:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT5]], %[[SCALAR_PH]] ], [ [[FOR1]], %[[FOR_BODY]] ]
-; IF-EVL-NEXT: [[FOR3:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT6]], %[[SCALAR_PH]] ], [ [[FOR2]], %[[FOR_BODY]] ]
+; IF-EVL-NEXT: [[INDVARS:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_NEXT:%.*]], %[[FOR_BODY]] ]
+; IF-EVL-NEXT: [[FOR1:%.*]] = phi i32 [ 33, %[[SCALAR_PH]] ], [ [[TMP38:%.*]], %[[FOR_BODY]] ]
+; IF-EVL-NEXT: [[FOR2:%.*]] = phi i32 [ 22, %[[SCALAR_PH]] ], [ [[FOR1]], %[[FOR_BODY]] ]
+; IF-EVL-NEXT: [[FOR3:%.*]] = phi i32 [ 11, %[[SCALAR_PH]] ], [ [[FOR2]], %[[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[INDVARS]]
; IF-EVL-NEXT: [[TMP38]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[FOR2]], [[FOR3]]
@@ -666,8 +666,8 @@ define void @first_order_recurrence_indvar(ptr noalias %A, i64 %TC) {
; IF-EVL-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i64 [ 33, %[[ENTRY]] ]
; IF-EVL-NEXT: br label %[[FOR_BODY:.*]]
; IF-EVL: [[FOR_BODY]]:
-; IF-EVL-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV1_NEXT:%.*]], %[[FOR_BODY]] ]
-; IF-EVL-NEXT: [[FOR1:%.*]] = phi i64 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[TMP14:%.*]], %[[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV1_NEXT:%.*]], %[[FOR_BODY]] ]
+; IF-EVL-NEXT: [[FOR1:%.*]] = phi i64 [ 33, %[[SCALAR_PH]] ], [ [[TMP14:%.*]], %[[FOR_BODY]] ]
; IF-EVL-NEXT: [[TMP14]] = add i64 [[IV1]], 42
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i64, ptr [[A]], i64 [[IV1]]
; IF-EVL-NEXT: store i64 [[FOR1]], ptr [[ARRAYIDX]], align 8
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll
index 2aeb1d0..da5aed9 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll
@@ -51,7 +51,7 @@ define void @gather_scatter(ptr noalias %in, ptr noalias %out, ptr noalias %inde
; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY1:%.*]] ]
; IF-EVL-NEXT: br label [[FOR_BODY1:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[INDVARS_IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT1:%.*]], [[FOR_BODY1]] ]
+; IF-EVL-NEXT: [[INDVARS_IV1:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT1:%.*]], [[FOR_BODY1]] ]
; IF-EVL-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[INDEX]], i64 [[INDVARS_IV1]]
; IF-EVL-NEXT: [[TMP0:%.*]] = load i64, ptr [[ARRAYIDX3]], align 8
; IF-EVL-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[IN]], i64 [[TMP0]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll
index 3e23df7..433d1e4 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll
@@ -44,8 +44,8 @@ define i32 @add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[ADD]] = add nsw i32 [[TMP18]], [[RDX]]
@@ -259,8 +259,8 @@ define i32 @or(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[OR:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[OR:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[OR]] = or i32 [[TMP18]], [[RDX]]
@@ -367,8 +367,8 @@ define i32 @and(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[AND:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[AND:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[AND]] = and i32 [[TMP18]], [[RDX]]
@@ -475,8 +475,8 @@ define i32 @xor(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[XOR:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[XOR:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[XOR]] = xor i32 [[TMP18]], [[RDX]]
@@ -583,8 +583,8 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SMIN:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[SMIN:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP17:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[TMP17]], [[RDX]]
@@ -694,8 +694,8 @@ define i32 @smax(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SMAX:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[SMAX:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP17:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp sgt i32 [[TMP17]], [[RDX]]
@@ -805,8 +805,8 @@ define i32 @umin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[UMIN:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[UMIN:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP17:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp ult i32 [[TMP17]], [[RDX]]
@@ -916,8 +916,8 @@ define i32 @umax(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[UMAX:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[UMAX:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP17:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp ugt i32 [[TMP17]], [[RDX]]
@@ -1027,8 +1027,8 @@ define float @fadd(ptr %a, i64 %n, float %start) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[ADD]] = fadd reassoc float [[TMP18]], [[RDX]]
@@ -1243,8 +1243,8 @@ define float @fmin(ptr %a, i64 %n, float %start) #0 {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MIN:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[MIN:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[CMP:%.*]] = fcmp fast olt float [[TMP17]], [[RDX]]
@@ -1356,8 +1356,8 @@ define float @fmax(ptr %a, i64 %n, float %start) #0 {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MAX:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[MAX:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[CMP:%.*]] = fcmp fast ogt float [[TMP17]], [[RDX]]
@@ -1687,8 +1687,8 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
@@ -1807,8 +1807,8 @@ define i32 @anyof_icmp(ptr %a, i64 %n, i32 %start, i32 %inv) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ANYOF:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ANYOF:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP21:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[TMP21]], 3
@@ -1924,8 +1924,8 @@ define i32 @anyof_fcmp(ptr %a, i64 %n, i32 %start, i32 %inv) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ANYOF:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ANYOF:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[CMP_I:%.*]] = fcmp fast olt float [[TMP21]], 3.000000e+00
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll
index 8d987a9..c5d2739 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll
@@ -50,7 +50,7 @@ define void @interleave(ptr noalias %a, ptr noalias %b, i64 %N) {
; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[B]], i64 [[IV]], i32 0
; IF-EVL-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x i32], ptr [[B]], i64 [[IV]], i32 1
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll
index d474a03..62a4f73 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll
@@ -39,7 +39,7 @@ define void @iv32(ptr noalias %a, ptr noalias %b, i32 %N) {
; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY1:%.*]] ]
; IF-EVL-NEXT: br label [[FOR_BODY1:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV1:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ]
+; IF-EVL-NEXT: [[IV1:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[IV1]]
; IF-EVL-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[IV1]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll
index 06c6bfe..296405d 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll
@@ -44,7 +44,7 @@ define void @trip_count_max_1024(ptr %p, i64 %tc) vscale_range(2, 1024) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[LOOP_PREHEADER]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[P]], i64 [[I]]
; CHECK-NEXT: [[X:%.*]] = load i64, ptr [[GEP]], align 8
; CHECK-NEXT: [[Y:%.*]] = add i64 [[X]], 1
@@ -113,7 +113,7 @@ define void @overflow_at_0(ptr %p, i64 %tc) vscale_range(2, 1024) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[LOOP_PREHEADER]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[P]], i64 [[I]]
; CHECK-NEXT: [[X:%.*]] = load i64, ptr [[GEP]], align 8
; CHECK-NEXT: [[Y:%.*]] = add i64 [[X]], 1
@@ -182,7 +182,7 @@ define void @no_overflow_at_0(ptr %p, i64 %tc) vscale_range(2, 1024) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[LOOP_PREHEADER]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[SCALAR_PH]] ]
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[P]], i64 [[I]]
; CHECK-NEXT: [[X:%.*]] = load i64, ptr [[GEP]], align 8
; CHECK-NEXT: [[Y:%.*]] = add i64 [[X]], 1
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll
index 5f407fc..e06bbe9 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll
@@ -43,7 +43,7 @@ define void @masked_loadstore(ptr noalias %a, ptr noalias %b, i64 %n) {
; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[I_011:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_INC:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
+; IF-EVL-NEXT: [[I_011:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_INC:%.*]] ], [ 0, [[SCALAR_PH]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[I_011]]
; IF-EVL-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[CMP1:%.*]] = icmp ne i32 [[TMP23]], 0
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll
index 59d1370..775d9ca 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll
@@ -43,8 +43,8 @@ define float @fadd(ptr noalias nocapture readonly %a, i64 %n) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[SUM_07:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[ADD]] = fadd float [[TMP17]], [[SUM_07]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll
index 2d5718b..464667d 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll
@@ -44,8 +44,8 @@ define i32 @add(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[ADD]] = add nsw i32 [[TMP18]], [[RDX]]
@@ -262,8 +262,8 @@ define i32 @or(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[OR:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[OR:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[OR]] = or i32 [[TMP18]], [[RDX]]
@@ -373,8 +373,8 @@ define i32 @and(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[AND:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[AND:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[AND]] = and i32 [[TMP18]], [[RDX]]
@@ -484,8 +484,8 @@ define i32 @xor(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[XOR:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[XOR:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[XOR]] = xor i32 [[TMP18]], [[RDX]]
@@ -597,8 +597,8 @@ define i32 @smin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SMIN:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[SMIN:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[TMP19]], [[RDX]]
@@ -715,8 +715,8 @@ define i32 @smax(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SMAX:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[SMAX:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp sgt i32 [[TMP19]], [[RDX]]
@@ -833,8 +833,8 @@ define i32 @umin(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[UMIN:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[UMIN:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp ult i32 [[TMP19]], [[RDX]]
@@ -951,8 +951,8 @@ define i32 @umax(ptr %a, i64 %n, i32 %start) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[UMAX:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[UMAX:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp ugt i32 [[TMP19]], [[RDX]]
@@ -1067,8 +1067,8 @@ define float @fadd(ptr %a, i64 %n, float %start) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[ADD]] = fadd reassoc float [[TMP18]], [[RDX]]
@@ -1287,8 +1287,8 @@ define float @fmin(ptr %a, i64 %n, float %start) #0 {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MIN:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[MIN:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[CMP:%.*]] = fcmp fast olt float [[TMP19]], [[RDX]]
@@ -1405,8 +1405,8 @@ define float @fmax(ptr %a, i64 %n, float %start) #0 {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MAX:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[MAX:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[CMP:%.*]] = fcmp fast ogt float [[TMP19]], [[RDX]]
@@ -1739,8 +1739,8 @@ define float @fmuladd(ptr %a, ptr %b, i64 %n, float %start) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi float [ [[START]], [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
@@ -1859,8 +1859,8 @@ define i32 @anyof_icmp(ptr %a, i64 %n, i32 %start, i32 %inv) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ANYOF:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ANYOF:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP20:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[CMP_I:%.*]] = icmp slt i32 [[TMP20]], 3
@@ -1976,8 +1976,8 @@ define i32 @anyof_fcmp(ptr %a, i64 %n, i32 %start, i32 %inv) {
; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[START]], [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ANYOF:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[RDX:%.*]] = phi i32 [ [[START]], [[SCALAR_PH]] ], [ [[ANYOF:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[CMP_I:%.*]] = fcmp fast olt float [[TMP20]], 3.000000e+00
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll
index e2db28d..397cb95 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll
@@ -57,8 +57,8 @@ define void @reverse_load_store(i64 %startval, ptr noalias %ptr, ptr noalias %pt
; IF-EVL-NEXT: [[BC_RESUME_VAL2:%.*]] = phi i32 [ 0, [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[ADD_PHI:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
-; IF-EVL-NEXT: [[I:%.*]] = phi i32 [ [[BC_RESUME_VAL2]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[ADD_PHI:%.*]] = phi i64 [ [[STARTVAL]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[I:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ADD]] = add i64 [[ADD_PHI]], -1
; IF-EVL-NEXT: [[GEPL:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[ADD]]
; IF-EVL-NEXT: [[TMP:%.*]] = load i32, ptr [[GEPL]], align 4
@@ -205,8 +205,8 @@ define void @reverse_load_store_masked(i64 %startval, ptr noalias %ptr, ptr noal
; IF-EVL-NEXT: [[BC_RESUME_VAL2:%.*]] = phi i32 [ 0, [[ENTRY]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[ADD_PHI:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_INC:%.*]] ]
-; IF-EVL-NEXT: [[I:%.*]] = phi i32 [ [[BC_RESUME_VAL2]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_INC]] ]
+; IF-EVL-NEXT: [[ADD_PHI:%.*]] = phi i64 [ [[STARTVAL]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_INC:%.*]] ]
+; IF-EVL-NEXT: [[I:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_INC]] ]
; IF-EVL-NEXT: [[ADD]] = add i64 [[ADD_PHI]], -1
; IF-EVL-NEXT: [[GEPL:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i32 [[I]]
; IF-EVL-NEXT: [[TMP:%.*]] = load i32, ptr [[GEPL]], align 4
@@ -388,7 +388,7 @@ define void @multiple_reverse_vector_pointer(ptr noalias %a, ptr noalias %b, ptr
; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1024, [[ENTRY:%.*]] ]
; IF-EVL-NEXT: br label [[LOOP:%.*]]
; IF-EVL: loop:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 1024, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
; IF-EVL-NEXT: [[GEP_A:%.*]] = getelementptr i8, ptr [[A]], i64 [[IV]]
; IF-EVL-NEXT: [[X:%.*]] = load i8, ptr [[GEP_A]], align 1
; IF-EVL-NEXT: [[GEP_B:%.*]] = getelementptr i8, ptr [[B]], i8 [[X]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll
index 1c78b25..2ec23b91 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll
@@ -44,7 +44,7 @@ define void @test(ptr %p) {
; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; IF-EVL-NEXT: br label [[LOOP:%.*]]
; IF-EVL: loop:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
; IF-EVL-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]]
; IF-EVL-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 8
; IF-EVL-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 200
@@ -375,7 +375,7 @@ define void @trivial_due_max_vscale(ptr %p) {
; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; IF-EVL-NEXT: br label [[LOOP:%.*]]
; IF-EVL: loop:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
; IF-EVL-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]]
; IF-EVL-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32
; IF-EVL-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 8192
@@ -483,7 +483,7 @@ define void @no_high_lmul_or_interleave(ptr %p) {
; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; IF-EVL-NEXT: br label [[LOOP:%.*]]
; IF-EVL: loop:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
; IF-EVL-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]]
; IF-EVL-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32
; IF-EVL-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 1024
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll
index 687a2e7..ab05166 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll
@@ -50,7 +50,7 @@ define void @lshift_significand(i32 %n, ptr nocapture writeonly %dst) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[SPEC_SELECT]], %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[SPEC_SELECT]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[TMP22:%.*]] = sub nuw nsw i64 1, [[IV1]]
; CHECK-NEXT: [[ARRAYIDX14:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP22]]
; CHECK-NEXT: store i64 0, ptr [[ARRAYIDX14]], align 8
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll
index 24649729..034b767 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-cost.ll
@@ -179,7 +179,7 @@ define void @truncate_to_i1_used_by_branch(i8 %x, ptr %dst) #0 {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i8 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[F_039:%.*]] = phi i8 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[LOOP_LATCH:.*]] ]
+; CHECK-NEXT: [[F_039:%.*]] = phi i8 [ 0, %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[LOOP_LATCH:.*]] ]
; CHECK-NEXT: [[TMP4:%.*]] = or i8 23, [[X]]
; CHECK-NEXT: [[EXTRACT_T:%.*]] = trunc i8 [[TMP4]] to i1
; CHECK-NEXT: br i1 [[EXTRACT_T]], label %[[THEN:.*]], label %[[LOOP_LATCH]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll
index dfdc893..01edeed 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll
@@ -36,7 +36,7 @@ define void @truncate_to_minimal_bitwidths_widen_cast_recipe(ptr %src) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[GEP_SRC1:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[IV1]]
; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr [[GEP_SRC1]], align 1
; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP11]] to i32
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll
index 568aa95..d97e93d 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll
@@ -117,7 +117,7 @@ define void @uniform_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i6
; TF-SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]]
; TF-SCALABLE: [[FOR_BODY]]:
-; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
; TF-SCALABLE-NEXT: [[V:%.*]] = load i64, ptr [[B]], align 8
; TF-SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
@@ -439,7 +439,7 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca
; TF-SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]]
; TF-SCALABLE: [[FOR_BODY]]:
-; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ]
+; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ]
; TF-SCALABLE-NEXT: [[CMP:%.*]] = icmp ugt i64 [[IV]], 10
; TF-SCALABLE-NEXT: br i1 [[CMP]], label %[[DO_LOAD:.*]], label %[[LATCH]]
; TF-SCALABLE: [[DO_LOAD]]:
@@ -589,7 +589,7 @@ define void @uniform_load_unaligned(ptr noalias nocapture %a, ptr noalias nocapt
; TF-SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]]
; TF-SCALABLE: [[FOR_BODY]]:
-; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
; TF-SCALABLE-NEXT: [[V:%.*]] = load i64, ptr [[B]], align 1
; TF-SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
@@ -726,7 +726,7 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i
; TF-SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]]
; TF-SCALABLE: [[FOR_BODY]]:
-; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[B]], align 8
; TF-SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
@@ -890,7 +890,7 @@ define void @uniform_store_of_loop_varying(ptr noalias nocapture %a, ptr noalias
; TF-SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]]
; TF-SCALABLE: [[FOR_BODY]]:
-; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
; TF-SCALABLE-NEXT: store i64 [[IV]], ptr [[B]], align 8
; TF-SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
@@ -1068,7 +1068,7 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc
; TF-SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]]
; TF-SCALABLE: [[FOR_BODY]]:
-; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ]
+; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ]
; TF-SCALABLE-NEXT: [[CMP:%.*]] = icmp ugt i64 [[IV]], 10
; TF-SCALABLE-NEXT: br i1 [[CMP]], label %[[DO_STORE:.*]], label %[[LATCH]]
; TF-SCALABLE: [[DO_STORE]]:
@@ -1216,7 +1216,7 @@ define void @uniform_store_unaligned(ptr noalias nocapture %a, ptr noalias nocap
; TF-SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]]
; TF-SCALABLE: [[FOR_BODY]]:
-; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[B]], align 1
; TF-SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vector-loop-backedge-elimination-with-evl.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vector-loop-backedge-elimination-with-evl.ll
index 7c1ec9a..d93a5c0 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vector-loop-backedge-elimination-with-evl.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vector-loop-backedge-elimination-with-evl.ll
@@ -27,7 +27,7 @@ define void @foo(ptr %arg) #0 {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[GEP:%.*]] = getelementptr [3 x i64], ptr [[ARG]], i64 0, i64 [[IV]]
; CHECK-NEXT: store i64 0, ptr [[GEP]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll
index 85116fe..d3c3c6b 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll
@@ -43,7 +43,7 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) {
; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP22:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]]
diff --git a/llvm/test/Transforms/LoopVectorize/SystemZ/force-target-instruction-cost.ll b/llvm/test/Transforms/LoopVectorize/SystemZ/force-target-instruction-cost.ll
index 082e326..0fb4655 100644
--- a/llvm/test/Transforms/LoopVectorize/SystemZ/force-target-instruction-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/SystemZ/force-target-instruction-cost.ll
@@ -42,7 +42,7 @@ define void @test_scalar_steps_target_instruction_cost(ptr %dst) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[IV]]
; CHECK-NEXT: store i64 [[IV]], ptr [[GEP]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 3
diff --git a/llvm/test/Transforms/LoopVectorize/SystemZ/pr47665.ll b/llvm/test/Transforms/LoopVectorize/SystemZ/pr47665.ll
index 02a876a..d7cc6f0 100644
--- a/llvm/test/Transforms/LoopVectorize/SystemZ/pr47665.ll
+++ b/llvm/test/Transforms/LoopVectorize/SystemZ/pr47665.ll
@@ -96,7 +96,7 @@ define void @test(ptr %p, i40 %a) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[SHL:%.*]] = shl i40 [[A]], 24
; CHECK-NEXT: [[ASHR:%.*]] = ashr i40 [[SHL]], 28
; CHECK-NEXT: [[TRUNC:%.*]] = trunc i40 [[ASHR]] to i32
diff --git a/llvm/test/Transforms/LoopVectorize/SystemZ/predicated-first-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/SystemZ/predicated-first-order-recurrence.ll
index e0fc73f..4e46a29 100644
--- a/llvm/test/Transforms/LoopVectorize/SystemZ/predicated-first-order-recurrence.ll
+++ b/llvm/test/Transforms/LoopVectorize/SystemZ/predicated-first-order-recurrence.ll
@@ -69,8 +69,8 @@ define void @func_21() {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[SCALAR_RECUR:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[LV:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[SCALAR_RECUR:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[LV:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[A_PTR:%.*]] = getelementptr inbounds [5 x i32], ptr @A, i64 0, i64 [[INDVARS_IV]]
; CHECK-NEXT: [[LV]] = load i32, ptr [[A_PTR]], align 4
; CHECK-NEXT: [[B_PTR:%.*]] = getelementptr inbounds [5 x i32], ptr @B, i64 0, i64 [[INDVARS_IV]]
diff --git a/llvm/test/Transforms/LoopVectorize/X86/constant-fold.ll b/llvm/test/Transforms/LoopVectorize/X86/constant-fold.ll
index c61b1b9..37493d1 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/constant-fold.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/constant-fold.ll
@@ -117,7 +117,7 @@ define void @redundant_or_1(ptr %dst, i1 %c.0, i1 %c.1) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
; CHECK: loop.header:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
; CHECK-NEXT: br i1 [[C_0]], label [[LOOP_LATCH]], label [[THEN_1:%.*]]
; CHECK: then.1:
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[IV]], 2
@@ -220,7 +220,7 @@ define void @redundant_or_2(ptr %dst, i1 %c.0, i1 %c.1) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
; CHECK: loop.header:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
; CHECK-NEXT: br i1 [[C_1]], label [[LOOP_LATCH]], label [[THEN_1:%.*]]
; CHECK: then.1:
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[IV]], 2
diff --git a/llvm/test/Transforms/LoopVectorize/X86/cost-model.ll b/llvm/test/Transforms/LoopVectorize/X86/cost-model.ll
index 85b475c..1a3ff6c 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/cost-model.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/cost-model.ll
@@ -1055,8 +1055,8 @@ define i64 @live_in_known_1_via_scev() {
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ 3, [[PH]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[RED:%.*]] = phi i64 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[RED_MUL:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[RED:%.*]] = phi i64 [ 3, [[SCALAR_PH]] ], [ [[RED_MUL:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[RED_MUL]] = mul nsw i64 [[RED]], [[P_EXT]]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV_NEXT]], [[N]]
diff --git a/llvm/test/Transforms/LoopVectorize/X86/drop-inbounds-flags-for-reverse-vector-pointer.ll b/llvm/test/Transforms/LoopVectorize/X86/drop-inbounds-flags-for-reverse-vector-pointer.ll
index 1249df4..ee85e0e 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/drop-inbounds-flags-for-reverse-vector-pointer.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/drop-inbounds-flags-for-reverse-vector-pointer.ll
@@ -46,8 +46,8 @@ define i1 @fn(ptr %nno) #0 {
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ]
; CHECK-NEXT: br label [[FOR_BODY20:%.*]]
; CHECK: loop.header:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC35:%.*]] ]
-; CHECK-NEXT: [[SUM_01:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SUM_1:%.*]], [[FOR_INC35]] ]
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 10, [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC35:%.*]] ]
+; CHECK-NEXT: [[SUM_01:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[SUM_1:%.*]], [[FOR_INC35]] ]
; CHECK-NEXT: [[REM4:%.*]] = and i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[CMP21:%.*]] = icmp eq i64 [[REM4]], 0
; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds nuw i32, ptr [[NNO]], i64 [[INDVARS_IV]]
diff --git a/llvm/test/Transforms/LoopVectorize/X86/fixed-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/X86/fixed-order-recurrence.ll
index fe2ad66..07b130b 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/fixed-order-recurrence.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/fixed-order-recurrence.ll
@@ -507,8 +507,8 @@ define void @test_first_order_recurrence_tried_to_scalarized(ptr %dst, i1 %c, i3
; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ 4, [[ENTRY]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[FOR:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[IV]], [[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[FOR:%.*]] = phi i32 [ 4, [[SCALAR_PH]] ], [ [[IV]], [[LOOP]] ]
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1
; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[FOR]]
; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds nuw i32, ptr [[DST]], i32 [[IV]]
diff --git a/llvm/test/Transforms/LoopVectorize/X86/induction-costs.ll b/llvm/test/Transforms/LoopVectorize/X86/induction-costs.ll
index fcd94f4..a66800c 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/induction-costs.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/induction-costs.ll
@@ -623,7 +623,7 @@ define void @wide_iv_trunc(ptr %dst, i64 %N) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[LOOP_PREHEADER]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP]] ], [ 0, [[SCALAR_PH]] ]
; CHECK-NEXT: [[IV_TRUNC:%.*]] = trunc i64 [[IV]] to i32
; CHECK-NEXT: store i32 [[IV_TRUNC]], ptr [[DST]], align 4
; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
diff --git a/llvm/test/Transforms/LoopVectorize/X86/optsize.ll b/llvm/test/Transforms/LoopVectorize/X86/optsize.ll
index 07e2df3..c5ac0ae 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/optsize.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/optsize.ll
@@ -35,7 +35,7 @@ define i32 @foo_optsize() #0 {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [32 x i8], ptr @tab, i32 0, i32 [[I_08]]
; CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 [[TMP7]], 0
@@ -72,7 +72,7 @@ define i32 @foo_optsize() #0 {
; AUTOVF-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ]
; AUTOVF-NEXT: br label [[FOR_BODY:%.*]]
; AUTOVF: for.body:
-; AUTOVF-NEXT: [[I_08:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
+; AUTOVF-NEXT: [[I_08:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
; AUTOVF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [32 x i8], ptr @tab, i32 0, i32 [[I_08]]
; AUTOVF-NEXT: [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
; AUTOVF-NEXT: [[CMP1:%.*]] = icmp eq i8 [[TMP7]], 0
@@ -131,7 +131,7 @@ define i32 @foo_minsize() #1 {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [32 x i8], ptr @tab, i32 0, i32 [[I_08]]
; CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 [[TMP7]], 0
@@ -168,7 +168,7 @@ define i32 @foo_minsize() #1 {
; AUTOVF-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ]
; AUTOVF-NEXT: br label [[FOR_BODY:%.*]]
; AUTOVF: for.body:
-; AUTOVF-NEXT: [[I_08:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
+; AUTOVF-NEXT: [[I_08:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
; AUTOVF-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [32 x i8], ptr @tab, i32 0, i32 [[I_08]]
; AUTOVF-NEXT: [[TMP7:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
; AUTOVF-NEXT: [[CMP1:%.*]] = icmp eq i8 [[TMP7]], 0
@@ -379,7 +379,7 @@ define void @tail_folded_store_avx512(ptr %start, ptr %end) #3 {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[START]], [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[START]], [[SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[PTR_IV_NEXT]] = getelementptr nusw i8, ptr [[PTR_IV]], i64 -72
; CHECK-NEXT: store ptr null, ptr [[PTR_IV]], align 8
; CHECK-NEXT: [[EC:%.*]] = icmp eq ptr [[PTR_IV_NEXT]], [[END]]
@@ -423,7 +423,7 @@ define void @tail_folded_store_avx512(ptr %start, ptr %end) #3 {
; AUTOVF-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[START]], [[ENTRY:%.*]] ]
; AUTOVF-NEXT: br label [[LOOP:%.*]]
; AUTOVF: loop:
-; AUTOVF-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], [[LOOP]] ]
+; AUTOVF-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[START]], [[SCALAR_PH]] ], [ [[PTR_IV_NEXT:%.*]], [[LOOP]] ]
; AUTOVF-NEXT: [[PTR_IV_NEXT]] = getelementptr nusw i8, ptr [[PTR_IV]], i64 -72
; AUTOVF-NEXT: store ptr null, ptr [[PTR_IV]], align 8
; AUTOVF-NEXT: [[EC:%.*]] = icmp eq ptr [[PTR_IV_NEXT]], [[END]]
diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr81872.ll b/llvm/test/Transforms/LoopVectorize/X86/pr81872.ll
index 08adfdd..11c5e39 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/pr81872.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/pr81872.ll
@@ -44,7 +44,7 @@ define void @test(ptr noundef align 8 dereferenceable_or_null(16) %arr) #0 {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 99, [[BB5:%.*]] ]
; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
; CHECK: loop.header:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 99, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
; CHECK-NEXT: [[AND:%.*]] = and i64 [[IV]], 1
; CHECK-NEXT: [[ICMP17:%.*]] = icmp eq i64 [[AND]], 0
; CHECK-NEXT: br i1 [[ICMP17]], label [[BB18:%.*]], label [[LOOP_LATCH]], !prof [[PROF5:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/X86/scev-checks-unprofitable.ll b/llvm/test/Transforms/LoopVectorize/X86/scev-checks-unprofitable.ll
index 440f6e1..4145967 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/scev-checks-unprofitable.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/scev-checks-unprofitable.ll
@@ -53,7 +53,7 @@ define void @value_defined_in_loop1_used_for_trip_counts(i32 %start, i1 %c, ptr
; CHECK-NEXT: [[EC_2:%.*]] = icmp ult i64 [[IV_2]], [[IV_1_LCSSA]]
; CHECK-NEXT: br i1 [[EC_2]], label %[[LOOP_2]], label %[[EXIT_1_LOOPEXIT:.*]]
; CHECK: [[LOOP_3]]:
-; CHECK-NEXT: [[IV_4:%.*]] = phi i64 [ [[IV_4_NEXT:%.*]], %[[LOOP_3]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; CHECK-NEXT: [[IV_4:%.*]] = phi i64 [ [[IV_4_NEXT:%.*]], %[[LOOP_3]] ], [ 0, %[[SCALAR_PH]] ]
; CHECK-NEXT: [[GEP_DST_2:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV_4]]
; CHECK-NEXT: store i8 0, ptr [[GEP_DST_2]], align 1
; CHECK-NEXT: [[IV_4_NEXT]] = add i64 [[IV_4]], 1
diff --git a/llvm/test/Transforms/LoopVectorize/X86/tail_loop_folding.ll b/llvm/test/Transforms/LoopVectorize/X86/tail_loop_folding.ll
index 5e35c4a..9a81fae 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/tail_loop_folding.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/tail_loop_folding.ll
@@ -35,7 +35,7 @@ define dso_local void @tail_folding_enabled(ptr noalias nocapture %A, ptr noalia
; CHECK: for.cond.cleanup:
; CHECK-NEXT: ret void
; CHECK: for.body:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDVARS_IV]]
@@ -99,7 +99,7 @@ define dso_local void @tail_folding_disabled(ptr noalias nocapture %A, ptr noali
; CHECK: for.cond.cleanup:
; CHECK-NEXT: ret void
; CHECK: for.body:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDVARS_IV]]
@@ -181,8 +181,8 @@ define i32 @reduction_i32(ptr nocapture readonly %A, ptr nocapture readonly %B,
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[SUM_0:%.*]] = phi i32 [ [[SUM_1:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
+; CHECK-NEXT: [[SUM_0:%.*]] = phi i32 [ [[SUM_1:%.*]], [[FOR_BODY]] ], [ 0, [[SCALAR_PH]] ]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[ARRAYIDXA:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[ARRAYIDXA]], align 4
diff --git a/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll b/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll
index f7eba42..a926ff4 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll
@@ -146,7 +146,7 @@ define void @vectorized1(ptr noalias nocapture %A, ptr noalias nocapture readonl
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP7]]
; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]]
diff --git a/llvm/test/Transforms/LoopVectorize/X86/vectorize-force-tail-with-evl.ll b/llvm/test/Transforms/LoopVectorize/X86/vectorize-force-tail-with-evl.ll
index 59f2925..e7fa655 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/vectorize-force-tail-with-evl.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/vectorize-force-tail-with-evl.ll
@@ -43,7 +43,7 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) {
; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; IF-EVL-NEXT: br label [[FOR_BODY:%.*]]
; IF-EVL: for.body:
-; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
; IF-EVL-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]]
diff --git a/llvm/test/Transforms/LoopVectorize/X86/vectorize-interleaved-accesses-gap.ll b/llvm/test/Transforms/LoopVectorize/X86/vectorize-interleaved-accesses-gap.ll
index e9d85c2..f4fe120 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/vectorize-interleaved-accesses-gap.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/vectorize-interleaved-accesses-gap.ll
@@ -79,7 +79,7 @@ define void @test_pr59090(ptr %l_out, ptr noalias %b) #0 {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[IV_MUL:%.*]] = mul nuw i64 [[IV]], 6
; CHECK-NEXT: [[L:%.*]] = load i8, ptr [[B]], align 1, !llvm.access.group [[ACC_GRP0]]
; CHECK-NEXT: store i8 [[L]], ptr [[B]], align 1, !llvm.access.group [[ACC_GRP0]]
diff --git a/llvm/test/Transforms/LoopVectorize/dead_instructions.ll b/llvm/test/Transforms/LoopVectorize/dead_instructions.ll
index 42d45bd..8ac33a1 100644
--- a/llvm/test/Transforms/LoopVectorize/dead_instructions.ll
+++ b/llvm/test/Transforms/LoopVectorize/dead_instructions.ll
@@ -102,9 +102,9 @@ define void @pr47390(ptr %a) {
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[PRIMARY:%.*]] = phi i32 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[PRIMARY_ADD:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[USE_PRIMARY:%.*]] = phi i32 [ [[BC_RESUME_VAL1]], %[[SCALAR_PH]] ], [ [[PRIMARY]], %[[LOOP]] ]
-; CHECK-NEXT: [[SECONDARY:%.*]] = phi i32 [ [[BC_RESUME_VAL2]], %[[SCALAR_PH]] ], [ [[SECONDARY_ADD:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[PRIMARY:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[PRIMARY_ADD:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[USE_PRIMARY:%.*]] = phi i32 [ -1, %[[SCALAR_PH]] ], [ [[PRIMARY]], %[[LOOP]] ]
+; CHECK-NEXT: [[SECONDARY:%.*]] = phi i32 [ 1, %[[SCALAR_PH]] ], [ [[SECONDARY_ADD:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[PRIMARY_ADD]] = add i32 [[PRIMARY]], 1
; CHECK-NEXT: [[SECONDARY_ADD]] = add i32 [[SECONDARY]], 1
; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[SECONDARY]]
diff --git a/llvm/test/Transforms/LoopVectorize/dereferenceable-info-from-assumption-variable-size.ll b/llvm/test/Transforms/LoopVectorize/dereferenceable-info-from-assumption-variable-size.ll
index c8cf2ad..9852f53 100644
--- a/llvm/test/Transforms/LoopVectorize/dereferenceable-info-from-assumption-variable-size.ll
+++ b/llvm/test/Transforms/LoopVectorize/dereferenceable-info-from-assumption-variable-size.ll
@@ -540,3 +540,227 @@ loop.latch:
exit:
ret void
}
+
+; The start access is SCEV with non-constant offset because of variable `iv.start`
+; for IV.
+define void @deref_assumption_loop_access_start_variable(i8 %v, ptr noundef %P, i64 range(i64 0, 2000) %N, ptr noalias %b, ptr noalias %c, i64 range(i64 0, 2000) %iv.start) nofree nosync {
+; CHECK-LABEL: define void @deref_assumption_loop_access_start_variable(
+; CHECK-SAME: i8 [[V:%.*]], ptr noundef [[P:%.*]], i64 range(i64 0, 2000) [[N:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]], i64 range(i64 0, 2000) [[IV_START:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[A:%.*]] = getelementptr i8, ptr [[P]], i64 16
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i64 [[IV_START]], [[N]]
+; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT: [[MUL:%.*]] = mul i64 [[N]], 4
+; CHECK-NEXT: [[ADD:%.*]] = add i64 [[MUL]], 16
+; CHECK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 [[ADD]]) ]
+; CHECK-NEXT: [[TMP3:%.*]] = sub i64 [[N]], [[IV_START]]
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP3]], 2
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP3]], 2
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[IV_START]], [[N_VEC]]
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_LOAD_CONTINUE2:.*]] ]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[IV_START]], [[INDEX]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP6]], align 1
+; CHECK-NEXT: [[TMP8:%.*]] = icmp sge <2 x i32> [[WIDE_LOAD]], zeroinitializer
+; CHECK-NEXT: [[TMP4:%.*]] = xor <2 x i1> [[TMP8]], splat (i1 true)
+; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x i1> [[TMP4]], i32 0
+; CHECK-NEXT: br i1 [[TMP5]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]]
+; CHECK: [[PRED_LOAD_IF]]:
+; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[OFFSET_IDX]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP16]]
+; CHECK-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP7]], align 1
+; CHECK-NEXT: [[TMP9:%.*]] = insertelement <2 x i32> poison, i32 [[TMP19]], i32 0
+; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE]]
+; CHECK: [[PRED_LOAD_CONTINUE]]:
+; CHECK-NEXT: [[TMP10:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP9]], %[[PRED_LOAD_IF]] ]
+; CHECK-NEXT: [[TMP11:%.*]] = extractelement <2 x i1> [[TMP4]], i32 1
+; CHECK-NEXT: br i1 [[TMP11]], label %[[PRED_LOAD_IF1:.*]], label %[[PRED_LOAD_CONTINUE2]]
+; CHECK: [[PRED_LOAD_IF1]]:
+; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[OFFSET_IDX]], 1
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP12]]
+; CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 1
+; CHECK-NEXT: [[TMP15:%.*]] = insertelement <2 x i32> [[TMP10]], i32 [[TMP14]], i32 1
+; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE2]]
+; CHECK: [[PRED_LOAD_CONTINUE2]]:
+; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = phi <2 x i32> [ [[TMP10]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP15]], %[[PRED_LOAD_IF1]] ]
+; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP8]], <2 x i32> [[WIDE_LOAD]], <2 x i32> [[WIDE_LOAD1]]
+; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: store <2 x i32> [[PREDPHI]], ptr [[TMP17]], align 1
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
+; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP1]], %[[MIDDLE_BLOCK]] ], [ [[IV_START]], %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
+; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 1
+; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0
+; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]]
+; CHECK: [[LOOP_THEN]]:
+; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 1
+; CHECK-NEXT: br label %[[LOOP_LATCH]]
+; CHECK: [[LOOP_LATCH]]:
+; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP]] ]
+; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]]
+; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 1
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[TERM_COND:%.*]] = icmp slt i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TERM_COND]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP15:![0-9]+]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret void
+;
+
+entry:
+ %a = getelementptr i8, ptr %P, i64 16
+ %cmp = icmp slt i64 %iv.start, %N
+ call void @llvm.assume(i1 %cmp)
+ %mul = mul i64 %N, 4
+ %add = add i64 %mul, 16
+ call void @llvm.assume(i1 true) [ "dereferenceable"(ptr %P, i64 %add) ]
+ br label %loop
+
+loop: ; preds = %mainloop, %loop.latch
+ %iv = phi i64 [ %iv.next, %loop.latch ], [ %iv.start, %entry ]
+ %gep.a = getelementptr inbounds i32, ptr %a, i64 %iv
+ %gep.b = getelementptr inbounds i32, ptr %b, i64 %iv
+ %l.b = load i32, ptr %gep.b, align 1
+ %c.1 = icmp sge i32 %l.b, 0
+ br i1 %c.1, label %loop.latch, label %loop.then
+
+loop.then: ; preds = %loop
+ %l.a = load i32, ptr %gep.a, align 1
+ br label %loop.latch
+
+loop.latch: ; preds = %loop.then, %loop
+ %merge = phi i32 [ %l.a, %loop.then ], [ %l.b, %loop ]
+ %gep.c = getelementptr inbounds i32, ptr %c, i64 %iv
+ store i32 %merge, ptr %gep.c, align 1
+ %iv.next = add nuw nsw i64 %iv, 1
+ %term.cond = icmp slt i64 %iv.next, %N
+ br i1 %term.cond, label %loop, label %exit
+
+exit:
+ ret void
+}
+
+; Same as previous test, but `iv.start` is not known nonnegative.
+define void @deref_assumption_loop_access_start_variable_unknown_range(i8 %v, ptr noundef %P, i64 range(i64 0, 2000) %N, ptr noalias %b, ptr noalias %c, i64 %iv.start) nofree nosync {
+; CHECK-LABEL: define void @deref_assumption_loop_access_start_variable_unknown_range(
+; CHECK-SAME: i8 [[V:%.*]], ptr noundef [[P:%.*]], i64 range(i64 0, 2000) [[N:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]], i64 [[IV_START:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[A:%.*]] = getelementptr i8, ptr [[P]], i64 16
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i64 [[IV_START]], [[N]]
+; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT: [[MUL:%.*]] = mul i64 [[N]], 4
+; CHECK-NEXT: [[ADD:%.*]] = add i64 [[MUL]], 16
+; CHECK-NEXT: call void @llvm.assume(i1 true) [ "dereferenceable"(ptr [[P]], i64 [[ADD]]) ]
+; CHECK-NEXT: [[TMP0:%.*]] = sub i64 [[N]], [[IV_START]]
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], 2
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], 2
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[IV_START]], [[N_VEC]]
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_LOAD_CONTINUE2:.*]] ]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[IV_START]], [[INDEX]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[TMP2]], align 1
+; CHECK-NEXT: [[TMP3:%.*]] = icmp sge <2 x i32> [[WIDE_LOAD]], zeroinitializer
+; CHECK-NEXT: [[TMP4:%.*]] = xor <2 x i1> [[TMP3]], splat (i1 true)
+; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x i1> [[TMP4]], i32 0
+; CHECK-NEXT: br i1 [[TMP5]], label %[[PRED_LOAD_IF:.*]], label %[[PRED_LOAD_CONTINUE:.*]]
+; CHECK: [[PRED_LOAD_IF]]:
+; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[OFFSET_IDX]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 1
+; CHECK-NEXT: [[TMP9:%.*]] = insertelement <2 x i32> poison, i32 [[TMP8]], i32 0
+; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE]]
+; CHECK: [[PRED_LOAD_CONTINUE]]:
+; CHECK-NEXT: [[TMP10:%.*]] = phi <2 x i32> [ poison, %[[VECTOR_BODY]] ], [ [[TMP9]], %[[PRED_LOAD_IF]] ]
+; CHECK-NEXT: [[TMP11:%.*]] = extractelement <2 x i1> [[TMP4]], i32 1
+; CHECK-NEXT: br i1 [[TMP11]], label %[[PRED_LOAD_IF1:.*]], label %[[PRED_LOAD_CONTINUE2]]
+; CHECK: [[PRED_LOAD_IF1]]:
+; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[OFFSET_IDX]], 1
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP12]]
+; CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 1
+; CHECK-NEXT: [[TMP15:%.*]] = insertelement <2 x i32> [[TMP10]], i32 [[TMP14]], i32 1
+; CHECK-NEXT: br label %[[PRED_LOAD_CONTINUE2]]
+; CHECK: [[PRED_LOAD_CONTINUE2]]:
+; CHECK-NEXT: [[TMP16:%.*]] = phi <2 x i32> [ [[TMP10]], %[[PRED_LOAD_CONTINUE]] ], [ [[TMP15]], %[[PRED_LOAD_IF1]] ]
+; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP3]], <2 x i32> [[WIDE_LOAD]], <2 x i32> [[TMP16]]
+; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: store <2 x i32> [[PREDPHI]], ptr [[TMP17]], align 1
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
+; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP1]], %[[MIDDLE_BLOCK]] ], [ [[IV_START]], %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
+; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
+; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 1
+; CHECK-NEXT: [[C_1:%.*]] = icmp sge i32 [[L_B]], 0
+; CHECK-NEXT: br i1 [[C_1]], label %[[LOOP_LATCH]], label %[[LOOP_THEN:.*]]
+; CHECK: [[LOOP_THEN]]:
+; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 1
+; CHECK-NEXT: br label %[[LOOP_LATCH]]
+; CHECK: [[LOOP_LATCH]]:
+; CHECK-NEXT: [[MERGE:%.*]] = phi i32 [ [[L_A]], %[[LOOP_THEN]] ], [ [[L_B]], %[[LOOP]] ]
+; CHECK-NEXT: [[GEP_C:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]]
+; CHECK-NEXT: store i32 [[MERGE]], ptr [[GEP_C]], align 1
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[TERM_COND:%.*]] = icmp slt i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[TERM_COND]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP17:![0-9]+]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret void
+;
+entry:
+ %a = getelementptr i8, ptr %P, i64 16
+ %cmp = icmp slt i64 %iv.start, %N
+ call void @llvm.assume(i1 %cmp)
+ %mul = mul i64 %N, 4
+ %add = add i64 %mul, 16
+ call void @llvm.assume(i1 true) [ "dereferenceable"(ptr %P, i64 %add) ]
+ br label %loop
+
+loop: ; preds = %mainloop, %loop.latch
+ %iv = phi i64 [ %iv.next, %loop.latch ], [ %iv.start, %entry ]
+ %gep.a = getelementptr inbounds i32, ptr %a, i64 %iv
+ %gep.b = getelementptr inbounds i32, ptr %b, i64 %iv
+ %l.b = load i32, ptr %gep.b, align 1
+ %c.1 = icmp sge i32 %l.b, 0
+ br i1 %c.1, label %loop.latch, label %loop.then
+
+loop.then: ; preds = %loop
+ %l.a = load i32, ptr %gep.a, align 1
+ br label %loop.latch
+
+loop.latch: ; preds = %loop.then, %loop
+ %merge = phi i32 [ %l.a, %loop.then ], [ %l.b, %loop ]
+ %gep.c = getelementptr inbounds i32, ptr %c, i64 %iv
+ store i32 %merge, ptr %gep.c, align 1
+ %iv.next = add nuw nsw i64 %iv, 1
+ %term.cond = icmp slt i64 %iv.next, %N
+ br i1 %term.cond, label %loop, label %exit
+
+exit:
+ ret void
+}
diff --git a/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-divisible-TC.ll b/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-divisible-TC.ll
index 1936b40..d666487 100644
--- a/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-divisible-TC.ll
+++ b/llvm/test/Transforms/LoopVectorize/dont-fold-tail-for-divisible-TC.ll
@@ -203,7 +203,7 @@ define dso_local void @cannotProveAlignedTC(ptr noalias nocapture %A, i32 %p, i3
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[LOOP_PREHEADER]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[RIV:%.*]] = phi i32 [ [[RIVPLUS1:%.*]], [[LOOP]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[RIV:%.*]] = phi i32 [ [[RIVPLUS1:%.*]], [[LOOP]] ], [ 0, [[SCALAR_PH]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[RIV]]
; CHECK-NEXT: store i32 13, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[RIVPLUS1]] = add nuw nsw i32 [[RIV]], 1
diff --git a/llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll
index 3adfcf5..db97bdf 100644
--- a/llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll
+++ b/llvm/test/Transforms/LoopVectorize/first-order-recurrence.ll
@@ -2750,9 +2750,9 @@ define i32 @sink_into_replication_region(i32 %y) {
; UNROLL-NO-IC-NEXT: [[VAR:%.*]] = phi i32 [ [[VAR6:%.*]], [[BB2]] ], [ [[TMP51]], [[MIDDLE_BLOCK]] ]
; UNROLL-NO-IC-NEXT: ret i32 [[VAR]]
; UNROLL-NO-IC: bb2:
-; UNROLL-NO-IC-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; UNROLL-NO-IC-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ]
-; UNROLL-NO-IC-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; UNROLL-NO-IC-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[Y]], [[SCALAR_PH]] ]
+; UNROLL-NO-IC-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
+; UNROLL-NO-IC-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
; UNROLL-NO-IC-NEXT: [[VAR6]] = add i32 [[VAR5]], [[VAR4]]
; UNROLL-NO-IC-NEXT: [[VAR7]] = udiv i32 219220132, [[VAR3]]
; UNROLL-NO-IC-NEXT: [[VAR8]] = add nsw i32 [[VAR3]], -1
@@ -2813,9 +2813,9 @@ define i32 @sink_into_replication_region(i32 %y) {
; UNROLL-NO-VF-NEXT: [[VAR:%.*]] = phi i32 [ [[VAR6:%.*]], [[BB2]] ], [ [[BIN_RDX]], [[MIDDLE_BLOCK]] ]
; UNROLL-NO-VF-NEXT: ret i32 [[VAR]]
; UNROLL-NO-VF: bb2:
-; UNROLL-NO-VF-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; UNROLL-NO-VF-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ]
-; UNROLL-NO-VF-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; UNROLL-NO-VF-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[Y]], [[SCALAR_PH]] ]
+; UNROLL-NO-VF-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
+; UNROLL-NO-VF-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
; UNROLL-NO-VF-NEXT: [[VAR6]] = add i32 [[VAR5]], [[VAR4]]
; UNROLL-NO-VF-NEXT: [[VAR7]] = udiv i32 219220132, [[VAR3]]
; UNROLL-NO-VF-NEXT: [[VAR8]] = add nsw i32 [[VAR3]], -1
@@ -2899,9 +2899,9 @@ define i32 @sink_into_replication_region(i32 %y) {
; SINK-AFTER-NEXT: [[VAR:%.*]] = phi i32 [ [[VAR6:%.*]], [[BB2]] ], [ [[TMP27]], [[MIDDLE_BLOCK]] ]
; SINK-AFTER-NEXT: ret i32 [[VAR]]
; SINK-AFTER: bb2:
-; SINK-AFTER-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; SINK-AFTER-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ]
-; SINK-AFTER-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; SINK-AFTER-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[Y]], [[SCALAR_PH]] ]
+; SINK-AFTER-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
+; SINK-AFTER-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
; SINK-AFTER-NEXT: [[VAR6]] = add i32 [[VAR5]], [[VAR4]]
; SINK-AFTER-NEXT: [[VAR7]] = udiv i32 219220132, [[VAR3]]
; SINK-AFTER-NEXT: [[VAR8]] = add nsw i32 [[VAR3]], -1
@@ -3113,10 +3113,10 @@ define i32 @sink_into_replication_region_multiple(ptr %x, i32 %y) {
; UNROLL-NO-IC-NEXT: [[VAR:%.*]] = phi i32 [ [[VAR6:%.*]], [[BB2]] ], [ [[TMP75]], [[MIDDLE_BLOCK]] ]
; UNROLL-NO-IC-NEXT: ret i32 [[VAR]]
; UNROLL-NO-IC: bb2:
-; UNROLL-NO-IC-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; UNROLL-NO-IC-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[BB2]] ], [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ]
-; UNROLL-NO-IC-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ]
-; UNROLL-NO-IC-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; UNROLL-NO-IC-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[Y]], [[SCALAR_PH]] ]
+; UNROLL-NO-IC-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
+; UNROLL-NO-IC-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
+; UNROLL-NO-IC-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
; UNROLL-NO-IC-NEXT: [[G:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[IV]]
; UNROLL-NO-IC-NEXT: [[VAR6]] = add i32 [[VAR5]], [[VAR4]]
; UNROLL-NO-IC-NEXT: [[VAR7]] = udiv i32 219220132, [[VAR3]]
@@ -3194,10 +3194,10 @@ define i32 @sink_into_replication_region_multiple(ptr %x, i32 %y) {
; UNROLL-NO-VF-NEXT: [[VAR:%.*]] = phi i32 [ [[VAR6:%.*]], [[BB2]] ], [ [[BIN_RDX]], [[MIDDLE_BLOCK]] ]
; UNROLL-NO-VF-NEXT: ret i32 [[VAR]]
; UNROLL-NO-VF: bb2:
-; UNROLL-NO-VF-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; UNROLL-NO-VF-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[BB2]] ], [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ]
-; UNROLL-NO-VF-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ]
-; UNROLL-NO-VF-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; UNROLL-NO-VF-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[Y]], [[SCALAR_PH]] ]
+; UNROLL-NO-VF-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
+; UNROLL-NO-VF-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
+; UNROLL-NO-VF-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
; UNROLL-NO-VF-NEXT: [[G:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[IV]]
; UNROLL-NO-VF-NEXT: [[VAR6]] = add i32 [[VAR5]], [[VAR4]]
; UNROLL-NO-VF-NEXT: [[VAR7]] = udiv i32 219220132, [[VAR3]]
@@ -3316,10 +3316,10 @@ define i32 @sink_into_replication_region_multiple(ptr %x, i32 %y) {
; SINK-AFTER-NEXT: [[VAR:%.*]] = phi i32 [ [[VAR6:%.*]], [[BB2]] ], [ [[TMP39]], [[MIDDLE_BLOCK]] ]
; SINK-AFTER-NEXT: ret i32 [[VAR]]
; SINK-AFTER: bb2:
-; SINK-AFTER-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
-; SINK-AFTER-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[BB2]] ], [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ]
-; SINK-AFTER-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ]
-; SINK-AFTER-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
+; SINK-AFTER-NEXT: [[VAR3:%.*]] = phi i32 [ [[VAR8:%.*]], [[BB2]] ], [ [[Y]], [[SCALAR_PH]] ]
+; SINK-AFTER-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
+; SINK-AFTER-NEXT: [[VAR4:%.*]] = phi i32 [ [[VAR7:%.*]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
+; SINK-AFTER-NEXT: [[VAR5:%.*]] = phi i32 [ [[VAR6]], [[BB2]] ], [ 0, [[SCALAR_PH]] ]
; SINK-AFTER-NEXT: [[G:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[IV]]
; SINK-AFTER-NEXT: [[VAR6]] = add i32 [[VAR5]], [[VAR4]]
; SINK-AFTER-NEXT: [[VAR7]] = udiv i32 219220132, [[VAR3]]
diff --git a/llvm/test/Transforms/LoopVectorize/intrinsic.ll b/llvm/test/Transforms/LoopVectorize/intrinsic.ll
index 9c910d7..10d83a4 100644
--- a/llvm/test/Transforms/LoopVectorize/intrinsic.ll
+++ b/llvm/test/Transforms/LoopVectorize/intrinsic.ll
@@ -324,6 +324,56 @@ for.end: ; preds = %for.body, %entry
declare double @llvm.exp2.f64(double)
+define void @ldexp_f32i32(i32 %n, ptr %y, ptr %x, i32 %exp) {
+; CHECK-LABEL: @ldexp_f32i32(
+; CHECK: llvm.ldexp.v4f32.v4i32
+; CHECK: ret void
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds float, ptr %y, i32 %iv
+ %0 = load float, ptr %arrayidx, align 4
+ %call = tail call float @llvm.ldexp.f32.i32(float %0, i32 %exp)
+ %arrayidx2 = getelementptr inbounds float, ptr %x, i32 %iv
+ store float %call, ptr %arrayidx2, align 4
+ %iv.next = add i32 %iv, 1
+ %exitcond = icmp eq i32 %iv.next, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare float @llvm.ldexp.f32.i32(float, i32)
+
+define void @ldexp_f64i32(i32 %n, ptr %y, ptr %x, i32 %exp) {
+; CHECK-LABEL: @ldexp_f64i32(
+; CHECK: llvm.ldexp.v4f64.v4i32
+; CHECK: ret void
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds double, ptr %y, i32 %iv
+ %0 = load double, ptr %arrayidx, align 8
+ %call = tail call double @llvm.ldexp.f64.i32(double %0, i32 %exp)
+ %arrayidx2 = getelementptr inbounds double, ptr %x, i32 %iv
+ store double %call, ptr %arrayidx2, align 8
+ %iv.next = add i32 %iv, 1
+ %exitcond = icmp eq i32 %iv.next, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare double @llvm.ldexp.f64.i32(double, i32)
+
define void @log_f32(i32 %n, ptr %y, ptr %x) {
; CHECK-LABEL: @log_f32(
; CHECK: llvm.log.v4f32
@@ -976,6 +1026,157 @@ for.end: ; preds = %for.body, %entry
declare double @llvm.roundeven.f64(double)
+
+define void @lround_i32f32(i32 %n, ptr %y, ptr %x) {
+; CHECK-LABEL: @lround_i32f32(
+; CHECK: llvm.lround.v4i32.v4f32
+; CHECK: ret void
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds float, ptr %y, i32 %iv
+ %0 = load float, ptr %arrayidx, align 4
+ %call = tail call i32 @llvm.lround.i32.f32(float %0)
+ %arrayidx2 = getelementptr inbounds i32, ptr %x, i32 %iv
+ store i32 %call, ptr %arrayidx2, align 4
+ %iv.next = add i32 %iv, 1
+ %exitcond = icmp eq i32 %iv.next, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare i32 @llvm.lround.i32.f32(float)
+
+define void @lround_i32f64(i32 %n, ptr %y, ptr %x) {
+; CHECK-LABEL: @lround_i32f64(
+; CHECK: llvm.lround.v4i32.v4f64
+; CHECK: ret void
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds double, ptr %y, i32 %iv
+ %0 = load double, ptr %arrayidx, align 8
+ %call = tail call i32 @llvm.lround.i32.f64(double %0)
+ %arrayidx2 = getelementptr inbounds i32, ptr %x, i32 %iv
+ store i32 %call, ptr %arrayidx2, align 8
+ %iv.next = add i32 %iv, 1
+ %exitcond = icmp eq i32 %iv.next, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare i32 @llvm.lround.i32.f64(double)
+
+define void @lround_i64f32(i32 %n, ptr %y, ptr %x) {
+; CHECK-LABEL: @lround_i64f32(
+; CHECK: llvm.lround.v4i64.v4f32
+; CHECK: ret void
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds float, ptr %y, i32 %iv
+ %0 = load float, ptr %arrayidx, align 4
+ %call = tail call i64 @llvm.lround.i64.f32(float %0)
+ %arrayidx2 = getelementptr inbounds i64, ptr %x, i32 %iv
+ store i64 %call, ptr %arrayidx2, align 4
+ %iv.next = add i32 %iv, 1
+ %exitcond = icmp eq i32 %iv.next, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare i64 @llvm.lround.i64.f32(float)
+
+define void @lround_i64f64(i32 %n, ptr %y, ptr %x) {
+; CHECK-LABEL: @lround_i64f64(
+; CHECK: llvm.lround.v4i64.v4f64
+; CHECK: ret void
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds double, ptr %y, i32 %iv
+ %0 = load double, ptr %arrayidx, align 8
+ %call = tail call i64 @llvm.lround.i64.f64(double %0)
+ %arrayidx2 = getelementptr inbounds i64, ptr %x, i32 %iv
+ store i64 %call, ptr %arrayidx2, align 8
+ %iv.next = add i32 %iv, 1
+ %exitcond = icmp eq i32 %iv.next, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare i64 @llvm.lround.i64.f64(double)
+
+define void @llround_i64f32(i32 %n, ptr %y, ptr %x) {
+; CHECK-LABEL: @llround_i64f32(
+; CHECK: llvm.llround.v4i64.v4f32
+; CHECK: ret void
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds float, ptr %y, i32 %iv
+ %0 = load float, ptr %arrayidx, align 4
+ %call = tail call i64 @llvm.llround.i64.f32(float %0)
+ %arrayidx2 = getelementptr inbounds i64, ptr %x, i32 %iv
+ store i64 %call, ptr %arrayidx2, align 4
+ %iv.next = add i32 %iv, 1
+ %exitcond = icmp eq i32 %iv.next, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare i64 @llvm.llround.i64.f32(float)
+
+define void @llround_i64f64(i32 %n, ptr %y, ptr %x) {
+; CHECK-LABEL: @llround_i64f64(
+; CHECK: llvm.llround.v4i64.v4f64
+; CHECK: ret void
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds double, ptr %y, i32 %iv
+ %0 = load double, ptr %arrayidx, align 8
+ %call = tail call i64 @llvm.llround.i64.f64(double %0)
+ %arrayidx2 = getelementptr inbounds i64, ptr %x, i32 %iv
+ store i64 %call, ptr %arrayidx2, align 8
+ %iv.next = add i32 %iv, 1
+ %exitcond = icmp eq i32 %iv.next, %n
+ br i1 %exitcond, label %for.end, label %for.body
+
+for.end: ; preds = %for.body, %entry
+ ret void
+}
+
+declare i64 @llvm.llround.i64.f64(double)
+
define void @fma_f32(i32 %n, ptr %y, ptr %x, ptr %z, ptr %w) {
; CHECK-LABEL: @fma_f32(
; CHECK: llvm.fma.v4f32
diff --git a/llvm/test/Transforms/LoopVectorize/iv-select-cmp-decreasing.ll b/llvm/test/Transforms/LoopVectorize/iv-select-cmp-decreasing.ll
index a0068f0..d6acba5 100644
--- a/llvm/test/Transforms/LoopVectorize/iv-select-cmp-decreasing.ll
+++ b/llvm/test/Transforms/LoopVectorize/iv-select-cmp-decreasing.ll
@@ -473,8 +473,8 @@ define i16 @select_decreasing_induction_icmp_table_i16(i16 noundef %val) {
; IC4VF4-NEXT: [[BC_MERGE_RDX:%.*]] = phi i16 [ 0, %[[ENTRY]] ]
; IC4VF4-NEXT: br label %[[LOOP:.*]]
; IC4VF4: [[LOOP]]:
-; IC4VF4-NEXT: [[IV:%.*]] = phi i16 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; IC4VF4-NEXT: [[RDX:%.*]] = phi i16 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ]
+; IC4VF4-NEXT: [[IV:%.*]] = phi i16 [ 12, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; IC4VF4-NEXT: [[RDX:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ]
; IC4VF4-NEXT: [[GEP_TABLE_IV:%.*]] = getelementptr inbounds [13 x i16], ptr @table, i16 0, i16 [[IV]]
; IC4VF4-NEXT: [[LD_TABLE:%.*]] = load i16, ptr [[GEP_TABLE_IV]], align 1
; IC4VF4-NEXT: [[CMP_TABLE_VAL:%.*]] = icmp ugt i16 [[LD_TABLE]], [[VAL]]
@@ -844,8 +844,8 @@ define i16 @select_decreasing_induction_icmp_table_half(half noundef %val) {
; IC4VF4-NEXT: [[BC_MERGE_RDX:%.*]] = phi i16 [ 0, %[[ENTRY]] ]
; IC4VF4-NEXT: br label %[[LOOP:.*]]
; IC4VF4: [[LOOP]]:
-; IC4VF4-NEXT: [[IV:%.*]] = phi i16 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; IC4VF4-NEXT: [[RDX:%.*]] = phi i16 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ]
+; IC4VF4-NEXT: [[IV:%.*]] = phi i16 [ 12, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; IC4VF4-NEXT: [[RDX:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[SPEC_SELECT:%.*]], %[[LOOP]] ]
; IC4VF4-NEXT: [[GEP_TABLE_IV:%.*]] = getelementptr inbounds [13 x i16], ptr @table, i16 0, i16 [[IV]]
; IC4VF4-NEXT: [[LD_TABLE:%.*]] = load half, ptr [[GEP_TABLE_IV]], align 1
; IC4VF4-NEXT: [[CMP_TABLE_VAL:%.*]] = fcmp ugt half [[LD_TABLE]], [[VAL]]
diff --git a/llvm/test/Transforms/LoopVectorize/loop-form.ll b/llvm/test/Transforms/LoopVectorize/loop-form.ll
index 10b2e70..22ebf92 100644
--- a/llvm/test/Transforms/LoopVectorize/loop-form.ll
+++ b/llvm/test/Transforms/LoopVectorize/loop-form.ll
@@ -84,7 +84,7 @@ define void @bottom_tested(ptr %p, i32 %n) {
; TAILFOLD-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ]
; TAILFOLD-NEXT: br label [[FOR_COND:%.*]]
; TAILFOLD: for.cond:
-; TAILFOLD-NEXT: [[I:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_COND]] ]
+; TAILFOLD-NEXT: [[I:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_COND]] ]
; TAILFOLD-NEXT: [[IPROM:%.*]] = sext i32 [[I]] to i64
; TAILFOLD-NEXT: [[B:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IPROM]]
; TAILFOLD-NEXT: store i16 0, ptr [[B]], align 4
diff --git a/llvm/test/Transforms/LoopVectorize/memdep-fold-tail.ll b/llvm/test/Transforms/LoopVectorize/memdep-fold-tail.ll
index c9066f2..72bc181 100644
--- a/llvm/test/Transforms/LoopVectorize/memdep-fold-tail.ll
+++ b/llvm/test/Transforms/LoopVectorize/memdep-fold-tail.ll
@@ -74,7 +74,7 @@ define void @maxvf3() {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[J:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[J_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[J:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[J_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[AJ:%.*]] = getelementptr inbounds [18 x i8], ptr @a, i32 0, i32 [[J]]
; CHECK-NEXT: store i8 69, ptr [[AJ]], align 8
; CHECK-NEXT: [[JP3:%.*]] = add nuw nsw i32 3, [[J]]
diff --git a/llvm/test/Transforms/LoopVectorize/optsize.ll b/llvm/test/Transforms/LoopVectorize/optsize.ll
index f0d026b..b9ee09e 100644
--- a/llvm/test/Transforms/LoopVectorize/optsize.ll
+++ b/llvm/test/Transforms/LoopVectorize/optsize.ll
@@ -626,6 +626,7 @@ define i32 @pr45526_pgso() !prof !14 {
; NPGSO-NEXT: br i1 [[TMP1]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
; NPGSO: [[MIDDLE_BLOCK]]:
; NPGSO-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i32> [[TMP0]], i32 3
+; NPGSO-NEXT: br label %[[SCALAR_PH]]
; NPGSO: [[SCALAR_PH]]:
; NPGSO-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 508, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
; NPGSO-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ [[VECTOR_RECUR_EXTRACT]], %[[MIDDLE_BLOCK]] ], [ 5, %[[ENTRY]] ]
@@ -698,7 +699,7 @@ define void @stride1(ptr noalias %B, i32 %BStride) optsize {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ]
; CHECK-NEXT: [[MULB:%.*]] = mul nsw i32 [[IV]], [[BSTRIDE]]
; CHECK-NEXT: [[GEPOFB:%.*]] = getelementptr inbounds i16, ptr [[B]], i32 [[MULB]]
; CHECK-NEXT: store i16 42, ptr [[GEPOFB]], align 4
@@ -747,7 +748,7 @@ define void @stride1(ptr noalias %B, i32 %BStride) optsize {
; PGSO-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, %[[ENTRY]] ]
; PGSO-NEXT: br label %[[FOR_BODY:.*]]
; PGSO: [[FOR_BODY]]:
-; PGSO-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; PGSO-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ]
; PGSO-NEXT: [[MULB:%.*]] = mul nsw i32 [[IV]], [[BSTRIDE]]
; PGSO-NEXT: [[GEPOFB:%.*]] = getelementptr inbounds i16, ptr [[B]], i32 [[MULB]]
; PGSO-NEXT: store i16 42, ptr [[GEPOFB]], align 4
@@ -796,7 +797,7 @@ define void @stride1(ptr noalias %B, i32 %BStride) optsize {
; NPGSO-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, %[[ENTRY]] ]
; NPGSO-NEXT: br label %[[FOR_BODY:.*]]
; NPGSO: [[FOR_BODY]]:
-; NPGSO-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; NPGSO-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ], [ 0, %[[SCALAR_PH]] ]
; NPGSO-NEXT: [[MULB:%.*]] = mul nsw i32 [[IV]], [[BSTRIDE]]
; NPGSO-NEXT: [[GEPOFB:%.*]] = getelementptr inbounds i16, ptr [[B]], i32 [[MULB]]
; NPGSO-NEXT: store i16 42, ptr [[GEPOFB]], align 4
diff --git a/llvm/test/Transforms/LoopVectorize/pointer-induction.ll b/llvm/test/Transforms/LoopVectorize/pointer-induction.ll
index 69931a0..d2c53f4 100644
--- a/llvm/test/Transforms/LoopVectorize/pointer-induction.ll
+++ b/llvm/test/Transforms/LoopVectorize/pointer-induction.ll
@@ -231,7 +231,6 @@ define void @non_constant_vector_expansion(i32 %0, ptr %call) {
; STRIDED: vector.body:
; STRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; STRIDED-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ null, [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
-; STRIDED-NEXT: [[TMP3:%.*]] = mul i64 [[TMP1]], 4
; STRIDED-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[TMP1]], i64 0
; STRIDED-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i64> [[DOTSPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
; STRIDED-NEXT: [[TMP4:%.*]] = mul <4 x i64> <i64 0, i64 1, i64 2, i64 3>, [[DOTSPLAT]]
@@ -240,6 +239,7 @@ define void @non_constant_vector_expansion(i32 %0, ptr %call) {
; STRIDED-NEXT: [[TMP6:%.*]] = getelementptr ptr, ptr [[CALL:%.*]], i32 [[OFFSET_IDX]]
; STRIDED-NEXT: store <4 x ptr> [[VECTOR_GEP]], ptr [[TMP6]], align 4
; STRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; STRIDED-NEXT: [[TMP3:%.*]] = mul i64 [[TMP1]], 4
; STRIDED-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP3]]
; STRIDED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100
; STRIDED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/pr45679-fold-tail-by-masking.ll b/llvm/test/Transforms/LoopVectorize/pr45679-fold-tail-by-masking.ll
index c044cc0..bda91ba 100644
--- a/llvm/test/Transforms/LoopVectorize/pr45679-fold-tail-by-masking.ll
+++ b/llvm/test/Transforms/LoopVectorize/pr45679-fold-tail-by-masking.ll
@@ -62,7 +62,7 @@ define void @pr45679(ptr %A) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[RIV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[RIVPLUS1:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[RIV:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[RIVPLUS1:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[RIV]]
; CHECK-NEXT: store i32 13, ptr [[ARRAYIDX]], align 1
; CHECK-NEXT: [[RIVPLUS1]] = add nuw nsw i32 [[RIV]], 1
@@ -124,7 +124,7 @@ define void @pr45679(ptr %A) {
; VF2UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ]
; VF2UF2-NEXT: br label [[LOOP:%.*]]
; VF2UF2: loop:
-; VF2UF2-NEXT: [[RIV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[RIVPLUS1:%.*]], [[LOOP]] ]
+; VF2UF2-NEXT: [[RIV:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[RIVPLUS1:%.*]], [[LOOP]] ]
; VF2UF2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[RIV]]
; VF2UF2-NEXT: store i32 13, ptr [[ARRAYIDX]], align 1
; VF2UF2-NEXT: [[RIVPLUS1]] = add nuw nsw i32 [[RIV]], 1
@@ -181,7 +181,7 @@ define void @pr45679(ptr %A) {
; VF1UF4-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ]
; VF1UF4-NEXT: br label [[LOOP:%.*]]
; VF1UF4: loop:
-; VF1UF4-NEXT: [[RIV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[RIVPLUS1:%.*]], [[LOOP]] ]
+; VF1UF4-NEXT: [[RIV:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[RIVPLUS1:%.*]], [[LOOP]] ]
; VF1UF4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[RIV]]
; VF1UF4-NEXT: store i32 13, ptr [[ARRAYIDX]], align 1
; VF1UF4-NEXT: [[RIVPLUS1]] = add nuw nsw i32 [[RIV]], 1
@@ -261,7 +261,7 @@ define void @load_variant(ptr noalias %a, ptr noalias %b) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[V:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: store i64 [[V]], ptr [[B]], align 8
@@ -328,7 +328,7 @@ define void @load_variant(ptr noalias %a, ptr noalias %b) {
; VF2UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; VF2UF2-NEXT: br label [[FOR_BODY:%.*]]
; VF2UF2: for.body:
-; VF2UF2-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; VF2UF2-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; VF2UF2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; VF2UF2-NEXT: [[V:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
; VF2UF2-NEXT: store i64 [[V]], ptr [[B]], align 8
@@ -390,7 +390,7 @@ define void @load_variant(ptr noalias %a, ptr noalias %b) {
; VF1UF4-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; VF1UF4-NEXT: br label [[FOR_BODY:%.*]]
; VF1UF4: for.body:
-; VF1UF4-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; VF1UF4-NEXT: [[IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; VF1UF4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; VF1UF4-NEXT: [[V:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
; VF1UF4-NEXT: store i64 [[V]], ptr [[B]], align 8
diff --git a/llvm/test/Transforms/LoopVectorize/pr46525-expander-insertpoint.ll b/llvm/test/Transforms/LoopVectorize/pr46525-expander-insertpoint.ll
index d4a6aed..7d6667c 100644
--- a/llvm/test/Transforms/LoopVectorize/pr46525-expander-insertpoint.ll
+++ b/llvm/test/Transforms/LoopVectorize/pr46525-expander-insertpoint.ll
@@ -36,7 +36,7 @@ define void @test(i16 %x, i64 %y, ptr %ptr) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[LOOP_PREHEADER]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP]] ], [ 0, [[SCALAR_PH]] ]
; CHECK-NEXT: store i32 0, ptr [[PTR]], align 4
; CHECK-NEXT: [[V2:%.*]] = trunc i64 [[IV]] to i8
; CHECK-NEXT: [[V3:%.*]] = add i8 [[V2]], 1
diff --git a/llvm/test/Transforms/LoopVectorize/pr51614-fold-tail-by-masking.ll b/llvm/test/Transforms/LoopVectorize/pr51614-fold-tail-by-masking.ll
index 77794dc..19c9ccc 100644
--- a/llvm/test/Transforms/LoopVectorize/pr51614-fold-tail-by-masking.ll
+++ b/llvm/test/Transforms/LoopVectorize/pr51614-fold-tail-by-masking.ll
@@ -67,8 +67,8 @@ define dso_local i16 @reverse_interleave_load_fold_mask() optsize {
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i16 [ 0, [[ENTRY]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i16 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IVMINUS1:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[SUM:%.*]] = phi i16 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[PREVSUM:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i16 [ 41, [[SCALAR_PH]] ], [ [[IVMINUS1:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[SUM:%.*]] = phi i16 [ 0, [[SCALAR_PH]] ], [ [[PREVSUM:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[IVMINUS1]] = add nsw i16 [[IV]], -1
; CHECK-NEXT: [[GEPA0:%.*]] = getelementptr inbounds [40 x [4 x i16]], ptr @A, i16 0, i16 [[IVMINUS1]], i16 0
; CHECK-NEXT: [[TMP29:%.*]] = load i16, ptr [[GEPA0]], align 1
diff --git a/llvm/test/Transforms/LoopVectorize/predicatedinst-loop-invariant.ll b/llvm/test/Transforms/LoopVectorize/predicatedinst-loop-invariant.ll
index ffe118b..90caee3 100644
--- a/llvm/test/Transforms/LoopVectorize/predicatedinst-loop-invariant.ll
+++ b/llvm/test/Transforms/LoopVectorize/predicatedinst-loop-invariant.ll
@@ -63,7 +63,7 @@ define void @loop_invariant_store(ptr %p, i64 %a, i8 %b) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[LOOP_LATCH:.*]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[LOOP_LATCH:.*]] ]
; CHECK-NEXT: [[ADD]] = add i32 [[IV]], 1
; CHECK-NEXT: [[CMP_SLT:%.*]] = icmp slt i32 [[IV]], 2
; CHECK-NEXT: [[SHL:%.*]] = shl i64 [[A]], 48
@@ -181,7 +181,7 @@ define void @loop_invariant_srem(ptr %p, i64 %a, i8 %b) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i8 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i8 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i8 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
; CHECK-NEXT: [[IV_NEXT]] = add i8 [[IV]], 1
; CHECK-NEXT: [[CMP_SLT:%.*]] = icmp slt i8 [[IV]], 2
; CHECK-NEXT: [[SHL:%.*]] = shl i64 [[A]], 48
@@ -253,7 +253,7 @@ define void @loop_invariant_float_store(ptr %p, i32 %a) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1
; CHECK-NEXT: [[CMP_SLT:%.*]] = icmp slt i32 [[IV]], 2
; CHECK-NEXT: br i1 [[CMP_SLT]], label %[[COND_FALSE:.*]], label %[[LOOP_LATCH]]
@@ -324,7 +324,7 @@ define void @test_store_to_invariant_address_needs_mask_due_to_low_trip_count(pt
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i16 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i16 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i16 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
; CHECK-NEXT: br i1 true, label %[[LOOP_LATCH]], label %[[ELSE:.*]]
; CHECK: [[ELSE]]:
; CHECK-NEXT: br label %[[LOOP_LATCH]]
diff --git a/llvm/test/Transforms/LoopVectorize/scalable-predication.ll b/llvm/test/Transforms/LoopVectorize/scalable-predication.ll
index 8e272de..a3a4c29 100644
--- a/llvm/test/Transforms/LoopVectorize/scalable-predication.ll
+++ b/llvm/test/Transforms/LoopVectorize/scalable-predication.ll
@@ -34,7 +34,7 @@ define void @foo(i32 %val, ptr dereferenceable(1024) %ptr) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[WHILE_BODY:%.*]]
; CHECK: while.body:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NEXT:%.*]], [[WHILE_BODY]] ], [ 0, [[SCALAR_PH]] ]
; CHECK-NEXT: [[GEP:%.*]] = getelementptr i32, ptr [[PTR:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[LD1:%.*]] = load i32, ptr [[GEP]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nsw i64 [[INDEX]], 1
diff --git a/llvm/test/Transforms/LoopVectorize/scev-predicate-reasoning.ll b/llvm/test/Transforms/LoopVectorize/scev-predicate-reasoning.ll
index b2acc64..77f2fc5 100644
--- a/llvm/test/Transforms/LoopVectorize/scev-predicate-reasoning.ll
+++ b/llvm/test/Transforms/LoopVectorize/scev-predicate-reasoning.ll
@@ -96,17 +96,17 @@ define void @integer_induction_wraps_scev_predicate_known(i32 %x, ptr %call, ptr
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[START]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP0]], 4
-; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[TMP0]], i64 0
-; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i64> [[DOTSPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP4:%.*]] = mul <4 x i64> <i64 0, i64 1, i64 2, i64 3>, [[DOTSPLAT]]
-; CHECK-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <4 x i64> [[TMP4]]
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[TMP0]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP3:%.*]] = mul <4 x i64> <i64 0, i64 1, i64 2, i64 3>, [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <4 x i64> [[TMP3]]
; CHECK-NEXT: [[DOTCAST:%.*]] = trunc i64 [[INDEX]] to i32
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i32 30, [[DOTCAST]]
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr ptr, ptr [[CALL]], i32 [[OFFSET_IDX]]
-; CHECK-NEXT: store <4 x ptr> [[VECTOR_GEP]], ptr [[TMP5]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr ptr, ptr [[CALL]], i32 [[OFFSET_IDX]]
+; CHECK-NEXT: store <4 x ptr> [[VECTOR_GEP]], ptr [[TMP4]], align 4
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
-; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP3]]
+; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP0]], 4
+; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP5]]
; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 992
; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/select-reduction.ll b/llvm/test/Transforms/LoopVectorize/select-reduction.ll
index cfc9bb2..03b3ff2 100644
--- a/llvm/test/Transforms/LoopVectorize/select-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/select-reduction.ll
@@ -42,8 +42,8 @@ define i32 @test(i64 %N, i32 %x) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[EXTRA_ITER]], [[LOOP_PREHEADER]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[NEXT:%.*]] = phi i32 [ [[SEL:%.*]], [[LOOP]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ]
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[NEXT:%.*]] = phi i32 [ [[SEL:%.*]], [[LOOP]] ], [ 0, [[SCALAR_PH]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP]] ], [ [[EXTRA_ITER]], [[SCALAR_PH]] ]
; CHECK-NEXT: [[SEL_COND:%.*]] = icmp sgt i32 [[NEXT]], 10
; CHECK-NEXT: [[SEL]] = select i1 [[SEL_COND]], i32 [[NEXT]], i32 10
; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], -1
@@ -98,8 +98,8 @@ define i32 @pr66895_tail_fold_reduction_exit_inst_gets_simplified(i32 %n) {
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[RED:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 12, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[RED:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], -1
; CHECK-NEXT: [[RED_NEXT]] = mul i32 [[RED]], 1
; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 0
diff --git a/llvm/test/Transforms/LoopVectorize/store-reduction-results-in-tail-folded-loop.ll b/llvm/test/Transforms/LoopVectorize/store-reduction-results-in-tail-folded-loop.ll
index bf86cbd..6052224 100644
--- a/llvm/test/Transforms/LoopVectorize/store-reduction-results-in-tail-folded-loop.ll
+++ b/llvm/test/Transforms/LoopVectorize/store-reduction-results-in-tail-folded-loop.ll
@@ -47,8 +47,8 @@ define void @pr75298_store_reduction_value_in_folded_loop(i64 %iv.start) optsize
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[PH]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[RED:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_START]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[RED:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[L:%.*]] = load i32, ptr @c, align 4
; CHECK-NEXT: [[RED_NEXT]] = xor i32 [[RED]], [[L]]
; CHECK-NEXT: store i32 [[RED_NEXT]], ptr @a, align 4
diff --git a/llvm/test/Transforms/LoopVectorize/strict-fadd-interleave-only.ll b/llvm/test/Transforms/LoopVectorize/strict-fadd-interleave-only.ll
index eefa3da..e7b243e 100644
--- a/llvm/test/Transforms/LoopVectorize/strict-fadd-interleave-only.ll
+++ b/llvm/test/Transforms/LoopVectorize/strict-fadd-interleave-only.ll
@@ -29,8 +29,8 @@ define float @pr70988() {
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDEX_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[RDX:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[RDX_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[INDEX_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[RDX:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[RDX_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[RDX_NEXT]] = fadd contract float [[RDX]], 1.000000e+00
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw nsw i32 [[INDEX]], 1
; CHECK-NEXT: [[COND:%.*]] = icmp ult i32 [[INDEX_NEXT]], 1021
@@ -64,8 +64,8 @@ define float @pr70988() {
; CHECK-ALM-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ]
; CHECK-ALM-NEXT: br label [[LOOP:%.*]]
; CHECK-ALM: loop:
-; CHECK-ALM-NEXT: [[INDEX:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDEX_NEXT:%.*]], [[LOOP]] ]
-; CHECK-ALM-NEXT: [[RDX:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[RDX_NEXT:%.*]], [[LOOP]] ]
+; CHECK-ALM-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[INDEX_NEXT:%.*]], [[LOOP]] ]
+; CHECK-ALM-NEXT: [[RDX:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[RDX_NEXT:%.*]], [[LOOP]] ]
; CHECK-ALM-NEXT: [[RDX_NEXT]] = fadd contract float [[RDX]], 1.000000e+00
; CHECK-ALM-NEXT: [[INDEX_NEXT]] = add nuw nsw i32 [[INDEX]], 1
; CHECK-ALM-NEXT: [[COND:%.*]] = icmp ult i32 [[INDEX_NEXT]], 1021
@@ -133,8 +133,8 @@ define float @pr72720reduction_using_active_lane_mask(ptr %src) {
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NARROW:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[RDX:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[RDX_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[NARROW:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[RDX:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[RDX_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[NARROW]] = add nuw nsw i32 [[IV]], 1
; CHECK-NEXT: [[GEP:%.*]] = getelementptr float, ptr [[SRC]], i32 [[IV]]
; CHECK-NEXT: [[L:%.*]] = load float, ptr [[GEP]], align 4
@@ -185,8 +185,8 @@ define float @pr72720reduction_using_active_lane_mask(ptr %src) {
; CHECK-ALM-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ]
; CHECK-ALM-NEXT: br label [[LOOP:%.*]]
; CHECK-ALM: loop:
-; CHECK-ALM-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NARROW:%.*]], [[LOOP]] ]
-; CHECK-ALM-NEXT: [[RDX:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[RDX_NEXT:%.*]], [[LOOP]] ]
+; CHECK-ALM-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[NARROW:%.*]], [[LOOP]] ]
+; CHECK-ALM-NEXT: [[RDX:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[RDX_NEXT:%.*]], [[LOOP]] ]
; CHECK-ALM-NEXT: [[NARROW]] = add nuw nsw i32 [[IV]], 1
; CHECK-ALM-NEXT: [[GEP:%.*]] = getelementptr float, ptr [[SRC]], i32 [[IV]]
; CHECK-ALM-NEXT: [[L:%.*]] = load float, ptr [[GEP]], align 4
@@ -243,8 +243,8 @@ define float @fadd_reduction_with_live_in(float %inc) {
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[SUM:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[SUM:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[SUM_NEXT]] = fadd float [[SUM]], [[INC]]
; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1
; CHECK-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 1000
@@ -279,8 +279,8 @@ define float @fadd_reduction_with_live_in(float %inc) {
; CHECK-ALM-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ]
; CHECK-ALM-NEXT: br label [[LOOP:%.*]]
; CHECK-ALM: loop:
-; CHECK-ALM-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; CHECK-ALM-NEXT: [[SUM:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[LOOP]] ]
+; CHECK-ALM-NEXT: [[IV:%.*]] = phi i32 [ 0, [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-ALM-NEXT: [[SUM:%.*]] = phi float [ 0.000000e+00, [[SCALAR_PH]] ], [ [[SUM_NEXT:%.*]], [[LOOP]] ]
; CHECK-ALM-NEXT: [[SUM_NEXT]] = fadd float [[SUM]], [[INC]]
; CHECK-ALM-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1
; CHECK-ALM-NEXT: [[EC:%.*]] = icmp eq i32 [[IV]], 1000
diff --git a/llvm/test/Transforms/LoopVectorize/tail-folding-alloca-in-loop.ll b/llvm/test/Transforms/LoopVectorize/tail-folding-alloca-in-loop.ll
index 3cf8b3f..9f33db8 100644
--- a/llvm/test/Transforms/LoopVectorize/tail-folding-alloca-in-loop.ll
+++ b/llvm/test/Transforms/LoopVectorize/tail-folding-alloca-in-loop.ll
@@ -58,7 +58,7 @@ define i32 @test(ptr %vf1, i64 %n) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_BODY]] ]
; CHECK-NEXT: [[TMP18:%.*]] = alloca i8, i64 [[N]], align 16
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds ptr, ptr [[VF1]], i64 [[INDVARS_IV]]
; CHECK-NEXT: store ptr [[TMP18]], ptr [[ARRAYIDX]], align 8
diff --git a/llvm/test/Transforms/LoopVectorize/tail-folding-optimize-vector-induction-width.ll b/llvm/test/Transforms/LoopVectorize/tail-folding-optimize-vector-induction-width.ll
index efc2b8d..ac15787 100644
--- a/llvm/test/Transforms/LoopVectorize/tail-folding-optimize-vector-induction-width.ll
+++ b/llvm/test/Transforms/LoopVectorize/tail-folding-optimize-vector-induction-width.ll
@@ -38,7 +38,7 @@ define void @canonical_small_tc_i8(ptr nocapture noundef writeonly %p) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]]
; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
@@ -99,7 +99,7 @@ define void @canonical_upper_limit_i8(ptr nocapture noundef writeonly %p) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]]
; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
@@ -160,7 +160,7 @@ define void @canonical_lower_limit_i16(ptr nocapture noundef writeonly %p) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]]
; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
@@ -221,7 +221,7 @@ define void @canonical_upper_limit_i16(ptr nocapture noundef writeonly %p) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]]
; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
@@ -282,7 +282,7 @@ define void @canonical_lower_limit_i32(ptr nocapture noundef writeonly %p) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]]
; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
@@ -343,7 +343,7 @@ define void @canonical_upper_limit_i32(ptr nocapture noundef writeonly %p) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]]
; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
@@ -404,7 +404,7 @@ define void @canonical_lower_limit_i64(ptr nocapture noundef writeonly %p) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]]
; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
@@ -465,7 +465,7 @@ define void @canonical_upper_limit_i64(ptr nocapture noundef writeonly %p) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i64 [[IV]]
; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
@@ -526,7 +526,7 @@ define void @canonical_lower_limit_i128(ptr nocapture noundef writeonly %p) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i256 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i256 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i256 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[P_IV:%.*]] = getelementptr inbounds i16, ptr [[P]], i256 [[IV]]
; CHECK-NEXT: store i16 1, ptr [[P_IV]], align 2
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i256 [[IV]], 1
diff --git a/llvm/test/Transforms/LoopVectorize/tail-folding-switch.ll b/llvm/test/Transforms/LoopVectorize/tail-folding-switch.ll
index 222c1ee..6f4bb1d 100644
--- a/llvm/test/Transforms/LoopVectorize/tail-folding-switch.ll
+++ b/llvm/test/Transforms/LoopVectorize/tail-folding-switch.ll
@@ -59,7 +59,7 @@ define void @tail_fold_switch(ptr %dst, i32 %0) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
; CHECK-NEXT: switch i32 [[TMP0]], label %[[LOOP_LATCH]] [
; CHECK-NEXT: i32 0, label %[[LOOP_LATCH]]
; CHECK-NEXT: i32 1, label %[[IF_THEN:.*]]
diff --git a/llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll b/llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll
index 13d5be1..d39a146 100644
--- a/llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll
+++ b/llvm/test/Transforms/LoopVectorize/tail-folding-vectorization-factor-1.ll
@@ -60,7 +60,7 @@ define void @VF1-VPlanExe(ptr %dst) {
; CHECK: for.cond.cleanup:
; CHECK-NEXT: ret void
; CHECK: for.body:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[DST_PTR:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[INDVARS_IV]]
; CHECK-NEXT: store i32 0, ptr [[DST_PTR]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
@@ -140,7 +140,7 @@ define void @VF1-VPWidenCanonicalIVRecipeExe(ptr %ptr1) {
; CHECK: for.cond.cleanup:
; CHECK-NEXT: ret void
; CHECK: for.body:
-; CHECK-NEXT: [[ADDR:%.*]] = phi ptr [ [[PTR:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[ADDR:%.*]] = phi ptr [ [[PTR:%.*]], [[FOR_BODY]] ], [ [[PTR1]], [[SCALAR_PH]] ]
; CHECK-NEXT: store double 0.000000e+00, ptr [[ADDR]], align 8
; CHECK-NEXT: [[PTR]] = getelementptr inbounds double, ptr [[ADDR]], i64 1
; CHECK-NEXT: [[COND:%.*]] = icmp eq ptr [[PTR]], [[PTR2]]
diff --git a/llvm/test/Transforms/LoopVectorize/uniform-blend.ll b/llvm/test/Transforms/LoopVectorize/uniform-blend.ll
index 85cf925..a35e763 100644
--- a/llvm/test/Transforms/LoopVectorize/uniform-blend.ll
+++ b/llvm/test/Transforms/LoopVectorize/uniform-blend.ll
@@ -302,7 +302,7 @@ define void @redundant_branch_and_blends_without_mask(ptr %A) {
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
; CHECK: [[LOOP_HEADER]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ]
; CHECK-NEXT: [[GEP_IV:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[GEP_IV]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[L]], 10
diff --git a/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll b/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll
index 59c76ae..983f327 100644
--- a/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll
+++ b/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll
@@ -224,7 +224,7 @@ define void @remove_loop_region_with_replicate_recipe(ptr %dst, i64 range(i64 5,
; VF8UF1-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 2, %[[ENTRY]] ]
; VF8UF1-NEXT: br label %[[LOOP:.*]]
; VF8UF1: [[LOOP]]:
-; VF8UF1-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; VF8UF1-NEXT: [[IV:%.*]] = phi i64 [ 2, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; VF8UF1-NEXT: [[GEP_DST:%.*]] = getelementptr i16, ptr [[DST]], i64 [[IV]]
; VF8UF1-NEXT: store i16 0, ptr [[GEP_DST]], align 2
; VF8UF1-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
@@ -368,7 +368,7 @@ define void @remove_loop_region_with_replicate_recipe(ptr %dst, i64 range(i64 5,
; VF8UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 2, %[[ENTRY]] ]
; VF8UF2-NEXT: br label %[[LOOP:.*]]
; VF8UF2: [[LOOP]]:
-; VF8UF2-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; VF8UF2-NEXT: [[IV:%.*]] = phi i64 [ 2, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; VF8UF2-NEXT: [[GEP_DST:%.*]] = getelementptr i16, ptr [[DST]], i64 [[IV]]
; VF8UF2-NEXT: store i16 0, ptr [[GEP_DST]], align 2
; VF8UF2-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
@@ -511,7 +511,7 @@ define void @remove_loop_region_with_replicate_recipe(ptr %dst, i64 range(i64 5,
; VF16UF1-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 2, %[[ENTRY]] ]
; VF16UF1-NEXT: br label %[[LOOP:.*]]
; VF16UF1: [[LOOP]]:
-; VF16UF1-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; VF16UF1-NEXT: [[IV:%.*]] = phi i64 [ 2, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; VF16UF1-NEXT: [[GEP_DST:%.*]] = getelementptr i16, ptr [[DST]], i64 [[IV]]
; VF16UF1-NEXT: store i16 0, ptr [[GEP_DST]], align 2
; VF16UF1-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
@@ -797,7 +797,7 @@ define void @scev_expand_step(i64 %x, ptr %dst) {
; VF8UF1-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; VF8UF1-NEXT: br label %[[LOOP:.*]]
; VF8UF1: [[LOOP]]:
-; VF8UF1-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; VF8UF1-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; VF8UF1-NEXT: [[IV_NEXT]] = add i64 [[IV]], [[STEP]]
; VF8UF1-NEXT: [[GEP_DST:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV_NEXT]]
; VF8UF1-NEXT: store i8 0, ptr [[GEP_DST]], align 1
@@ -994,7 +994,7 @@ define void @scev_expand_step(i64 %x, ptr %dst) {
; VF8UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; VF8UF2-NEXT: br label %[[LOOP:.*]]
; VF8UF2: [[LOOP]]:
-; VF8UF2-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; VF8UF2-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; VF8UF2-NEXT: [[IV_NEXT]] = add i64 [[IV]], [[STEP]]
; VF8UF2-NEXT: [[GEP_DST:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV_NEXT]]
; VF8UF2-NEXT: store i8 0, ptr [[GEP_DST]], align 1
@@ -1190,7 +1190,7 @@ define void @scev_expand_step(i64 %x, ptr %dst) {
; VF16UF1-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[ENTRY]] ]
; VF16UF1-NEXT: br label %[[LOOP:.*]]
; VF16UF1: [[LOOP]]:
-; VF16UF1-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; VF16UF1-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; VF16UF1-NEXT: [[IV_NEXT]] = add i64 [[IV]], [[STEP]]
; VF16UF1-NEXT: [[GEP_DST:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV_NEXT]]
; VF16UF1-NEXT: store i8 0, ptr [[GEP_DST]], align 1
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/commute.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/commute.ll
index 4427699..9e086dca 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/commute.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/commute.ll
@@ -8,15 +8,18 @@ target triple = "aarch64--linux-gnu"
define void @test1(ptr nocapture readonly %J, i32 %xmin, i32 %ymin) {
; CHECK-LABEL: @test1(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x i32> poison, i32 [[XMIN:%.*]], i32 0
-; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x i32> [[TMP0]], i32 [[YMIN:%.*]], i32 1
; CHECK-NEXT: br label [[FOR_BODY3_LR_PH:%.*]]
; CHECK: for.body3.lr.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = sitofp <2 x i32> [[TMP1]] to <2 x float>
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x float>, ptr [[J:%.*]], align 4
-; CHECK-NEXT: [[TMP5:%.*]] = fsub fast <2 x float> [[TMP2]], [[TMP4]]
-; CHECK-NEXT: [[TMP6:%.*]] = fmul fast <2 x float> [[TMP5]], [[TMP5]]
-; CHECK-NEXT: [[ADD:%.*]] = call fast float @llvm.vector.reduce.fadd.v2f32(float 0.000000e+00, <2 x float> [[TMP6]])
+; CHECK-NEXT: [[CONV5:%.*]] = sitofp i32 [[YMIN:%.*]] to float
+; CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[XMIN:%.*]] to float
+; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[J:%.*]], align 4
+; CHECK-NEXT: [[SUB:%.*]] = fsub fast float [[CONV]], [[TMP0]]
+; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds [[STRUCTA:%.*]], ptr [[J]], i64 0, i32 0, i64 1
+; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX9]], align 4
+; CHECK-NEXT: [[SUB10:%.*]] = fsub fast float [[CONV5]], [[TMP1]]
+; CHECK-NEXT: [[MUL11:%.*]] = fmul fast float [[SUB]], [[SUB]]
+; CHECK-NEXT: [[MUL12:%.*]] = fmul fast float [[SUB10]], [[SUB10]]
+; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[MUL11]], [[MUL12]]
; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[ADD]], 0.000000e+00
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY3_LR_PH]], label [[FOR_END27:%.*]]
; CHECK: for.end27:
@@ -47,15 +50,18 @@ for.end27:
define void @test2(ptr nocapture readonly %J, i32 %xmin, i32 %ymin) {
; CHECK-LABEL: @test2(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x i32> poison, i32 [[XMIN:%.*]], i32 0
-; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x i32> [[TMP0]], i32 [[YMIN:%.*]], i32 1
; CHECK-NEXT: br label [[FOR_BODY3_LR_PH:%.*]]
; CHECK: for.body3.lr.ph:
-; CHECK-NEXT: [[TMP2:%.*]] = sitofp <2 x i32> [[TMP1]] to <2 x float>
-; CHECK-NEXT: [[TMP4:%.*]] = load <2 x float>, ptr [[J:%.*]], align 4
-; CHECK-NEXT: [[TMP5:%.*]] = fsub fast <2 x float> [[TMP2]], [[TMP4]]
-; CHECK-NEXT: [[TMP6:%.*]] = fmul fast <2 x float> [[TMP5]], [[TMP5]]
-; CHECK-NEXT: [[ADD:%.*]] = call fast float @llvm.vector.reduce.fadd.v2f32(float 0.000000e+00, <2 x float> [[TMP6]])
+; CHECK-NEXT: [[CONV5:%.*]] = sitofp i32 [[YMIN:%.*]] to float
+; CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[XMIN:%.*]] to float
+; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[J:%.*]], align 4
+; CHECK-NEXT: [[SUB:%.*]] = fsub fast float [[CONV]], [[TMP0]]
+; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds [[STRUCTA:%.*]], ptr [[J]], i64 0, i32 0, i64 1
+; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX9]], align 4
+; CHECK-NEXT: [[SUB10:%.*]] = fsub fast float [[CONV5]], [[TMP1]]
+; CHECK-NEXT: [[MUL11:%.*]] = fmul fast float [[SUB]], [[SUB]]
+; CHECK-NEXT: [[MUL12:%.*]] = fmul fast float [[SUB10]], [[SUB10]]
+; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[MUL12]], [[MUL11]]
; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[ADD]], 0.000000e+00
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY3_LR_PH]], label [[FOR_END27:%.*]]
; CHECK: for.end27:
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/exp.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/exp.ll
new file mode 100644
index 0000000..301e5da
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/exp.ll
@@ -0,0 +1,279 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S -passes=slp-vectorizer -mtriple=aarch64 < %s | FileCheck %s
+
+target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
+
+define void @ldexp_f32i32(ptr %x, ptr %y, i32 %exp) {
+; CHECK-LABEL: @ldexp_f32i32(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, ptr [[X]], i64 1
+; CHECK-NEXT: [[L2:%.*]] = load float, ptr [[ARRAYIDX_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, ptr [[X]], i64 2
+; CHECK-NEXT: [[L4:%.*]] = load float, ptr [[ARRAYIDX_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, ptr [[X]], i64 3
+; CHECK-NEXT: [[L6:%.*]] = load float, ptr [[ARRAYIDX_3]], align 4
+; CHECK-NEXT: [[L1:%.*]] = tail call float @llvm.ldexp.f32.i32(float [[L0]], i32 [[EXP:%.*]])
+; CHECK-NEXT: [[L3:%.*]] = tail call float @llvm.ldexp.f32.i32(float [[L2]], i32 [[EXP]])
+; CHECK-NEXT: [[L5:%.*]] = tail call float @llvm.ldexp.f32.i32(float [[L4]], i32 [[EXP]])
+; CHECK-NEXT: [[L7:%.*]] = tail call float @llvm.ldexp.f32.i32(float [[L6]], i32 [[EXP]])
+; CHECK-NEXT: store float [[L1]], ptr [[Y:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 1
+; CHECK-NEXT: store float [[L3]], ptr [[ARRAYIDX2_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 2
+; CHECK-NEXT: store float [[L5]], ptr [[ARRAYIDX2_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 3
+; CHECK-NEXT: store float [[L7]], ptr [[ARRAYIDX2_3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %l0 = load float, ptr %x, align 4
+ %arrayidx.1 = getelementptr inbounds float, ptr %x, i64 1
+ %l2 = load float, ptr %arrayidx.1, align 4
+ %arrayidx.2 = getelementptr inbounds float, ptr %x, i64 2
+ %l4 = load float, ptr %arrayidx.2, align 4
+ %arrayidx.3 = getelementptr inbounds float, ptr %x, i64 3
+ %l6 = load float, ptr %arrayidx.3, align 4
+ %l1 = tail call float @llvm.ldexp.f32.i32(float %l0, i32 %exp)
+ %l3 = tail call float @llvm.ldexp.f32.i32(float %l2, i32 %exp)
+ %l5 = tail call float @llvm.ldexp.f32.i32(float %l4, i32 %exp)
+ %l7 = tail call float @llvm.ldexp.f32.i32(float %l6, i32 %exp)
+ store float %l1, ptr %y, align 4
+ %arrayidx2.1 = getelementptr inbounds float, ptr %y, i64 1
+ store float %l3, ptr %arrayidx2.1, align 4
+ %arrayidx2.2 = getelementptr inbounds float, ptr %y, i64 2
+ store float %l5, ptr %arrayidx2.2, align 4
+ %arrayidx2.3 = getelementptr inbounds float, ptr %y, i64 3
+ store float %l7, ptr %arrayidx2.3, align 4
+ ret void
+}
+
+define void @ldexp_f64i32(ptr %x, ptr %y, i32 %exp) {
+; CHECK-LABEL: @ldexp_f64i32(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[L0:%.*]] = load double, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds double, ptr [[X]], i64 1
+; CHECK-NEXT: [[L2:%.*]] = load double, ptr [[ARRAYIDX_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds double, ptr [[X]], i64 2
+; CHECK-NEXT: [[L4:%.*]] = load double, ptr [[ARRAYIDX_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds double, ptr [[X]], i64 3
+; CHECK-NEXT: [[L6:%.*]] = load double, ptr [[ARRAYIDX_3]], align 4
+; CHECK-NEXT: [[L1:%.*]] = tail call double @llvm.ldexp.f64.i32(double [[L0]], i32 [[EXP:%.*]])
+; CHECK-NEXT: [[L3:%.*]] = tail call double @llvm.ldexp.f64.i32(double [[L2]], i32 [[EXP]])
+; CHECK-NEXT: [[L5:%.*]] = tail call double @llvm.ldexp.f64.i32(double [[L4]], i32 [[EXP]])
+; CHECK-NEXT: [[L7:%.*]] = tail call double @llvm.ldexp.f64.i32(double [[L6]], i32 [[EXP]])
+; CHECK-NEXT: store double [[L1]], ptr [[Y:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 1
+; CHECK-NEXT: store double [[L3]], ptr [[ARRAYIDX2_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 2
+; CHECK-NEXT: store double [[L5]], ptr [[ARRAYIDX2_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 3
+; CHECK-NEXT: store double [[L7]], ptr [[ARRAYIDX2_3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %l0 = load double, ptr %x, align 4
+ %arrayidx.1 = getelementptr inbounds double, ptr %x, i64 1
+ %l2 = load double, ptr %arrayidx.1, align 4
+ %arrayidx.2 = getelementptr inbounds double, ptr %x, i64 2
+ %l4 = load double, ptr %arrayidx.2, align 4
+ %arrayidx.3 = getelementptr inbounds double, ptr %x, i64 3
+ %l6 = load double, ptr %arrayidx.3, align 4
+ %l1 = tail call double @llvm.ldexp.f64.i32(double %l0, i32 %exp)
+ %l3 = tail call double @llvm.ldexp.f64.i32(double %l2, i32 %exp)
+ %l5 = tail call double @llvm.ldexp.f64.i32(double %l4, i32 %exp)
+ %l7 = tail call double @llvm.ldexp.f64.i32(double %l6, i32 %exp)
+ store double %l1, ptr %y, align 4
+ %arrayidx2.1 = getelementptr inbounds double, ptr %y, i64 1
+ store double %l3, ptr %arrayidx2.1, align 4
+ %arrayidx2.2 = getelementptr inbounds double, ptr %y, i64 2
+ store double %l5, ptr %arrayidx2.2, align 4
+ %arrayidx2.3 = getelementptr inbounds double, ptr %y, i64 3
+ store double %l7, ptr %arrayidx2.3, align 4
+ ret void
+}
+
+define void @ldexp_f32i64(ptr %x, ptr %y, i64 %exp) {
+; CHECK-LABEL: @ldexp_f32i64(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, ptr [[X]], i64 1
+; CHECK-NEXT: [[L2:%.*]] = load float, ptr [[ARRAYIDX_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, ptr [[X]], i64 2
+; CHECK-NEXT: [[L4:%.*]] = load float, ptr [[ARRAYIDX_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, ptr [[X]], i64 3
+; CHECK-NEXT: [[L6:%.*]] = load float, ptr [[ARRAYIDX_3]], align 4
+; CHECK-NEXT: [[L1:%.*]] = tail call float @llvm.ldexp.f32.i64(float [[L0]], i64 [[EXP:%.*]])
+; CHECK-NEXT: [[L3:%.*]] = tail call float @llvm.ldexp.f32.i64(float [[L2]], i64 [[EXP]])
+; CHECK-NEXT: [[L5:%.*]] = tail call float @llvm.ldexp.f32.i64(float [[L4]], i64 [[EXP]])
+; CHECK-NEXT: [[L7:%.*]] = tail call float @llvm.ldexp.f32.i64(float [[L6]], i64 [[EXP]])
+; CHECK-NEXT: store float [[L1]], ptr [[Y:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 1
+; CHECK-NEXT: store float [[L3]], ptr [[ARRAYIDX2_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 2
+; CHECK-NEXT: store float [[L5]], ptr [[ARRAYIDX2_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 3
+; CHECK-NEXT: store float [[L7]], ptr [[ARRAYIDX2_3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %l0 = load float, ptr %x, align 4
+ %arrayidx.1 = getelementptr inbounds float, ptr %x, i64 1
+ %l2 = load float, ptr %arrayidx.1, align 4
+ %arrayidx.2 = getelementptr inbounds float, ptr %x, i64 2
+ %l4 = load float, ptr %arrayidx.2, align 4
+ %arrayidx.3 = getelementptr inbounds float, ptr %x, i64 3
+ %l6 = load float, ptr %arrayidx.3, align 4
+ %l1 = tail call float @llvm.ldexp.f32.i64(float %l0, i64 %exp)
+ %l3 = tail call float @llvm.ldexp.f32.i64(float %l2, i64 %exp)
+ %l5 = tail call float @llvm.ldexp.f32.i64(float %l4, i64 %exp)
+ %l7 = tail call float @llvm.ldexp.f32.i64(float %l6, i64 %exp)
+ store float %l1, ptr %y, align 4
+ %arrayidx2.1 = getelementptr inbounds float, ptr %y, i64 1
+ store float %l3, ptr %arrayidx2.1, align 4
+ %arrayidx2.2 = getelementptr inbounds float, ptr %y, i64 2
+ store float %l5, ptr %arrayidx2.2, align 4
+ %arrayidx2.3 = getelementptr inbounds float, ptr %y, i64 3
+ store float %l7, ptr %arrayidx2.3, align 4
+ ret void
+}
+
+define void @ldexp_f64i64(ptr %x, ptr %y, i64 %exp) {
+; CHECK-LABEL: @ldexp_f64i64(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[L0:%.*]] = load double, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds double, ptr [[X]], i64 1
+; CHECK-NEXT: [[L2:%.*]] = load double, ptr [[ARRAYIDX_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds double, ptr [[X]], i64 2
+; CHECK-NEXT: [[L4:%.*]] = load double, ptr [[ARRAYIDX_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds double, ptr [[X]], i64 3
+; CHECK-NEXT: [[L6:%.*]] = load double, ptr [[ARRAYIDX_3]], align 4
+; CHECK-NEXT: [[L1:%.*]] = tail call double @llvm.ldexp.f64.i64(double [[L0]], i64 [[EXP:%.*]])
+; CHECK-NEXT: [[L3:%.*]] = tail call double @llvm.ldexp.f64.i64(double [[L2]], i64 [[EXP]])
+; CHECK-NEXT: [[L5:%.*]] = tail call double @llvm.ldexp.f64.i64(double [[L4]], i64 [[EXP]])
+; CHECK-NEXT: [[L7:%.*]] = tail call double @llvm.ldexp.f64.i64(double [[L6]], i64 [[EXP]])
+; CHECK-NEXT: store double [[L1]], ptr [[Y:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 1
+; CHECK-NEXT: store double [[L3]], ptr [[ARRAYIDX2_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 2
+; CHECK-NEXT: store double [[L5]], ptr [[ARRAYIDX2_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 3
+; CHECK-NEXT: store double [[L7]], ptr [[ARRAYIDX2_3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %l0 = load double, ptr %x, align 4
+ %arrayidx.1 = getelementptr inbounds double, ptr %x, i64 1
+ %l2 = load double, ptr %arrayidx.1, align 4
+ %arrayidx.2 = getelementptr inbounds double, ptr %x, i64 2
+ %l4 = load double, ptr %arrayidx.2, align 4
+ %arrayidx.3 = getelementptr inbounds double, ptr %x, i64 3
+ %l6 = load double, ptr %arrayidx.3, align 4
+ %l1 = tail call double @llvm.ldexp.f64.i64(double %l0, i64 %exp)
+ %l3 = tail call double @llvm.ldexp.f64.i64(double %l2, i64 %exp)
+ %l5 = tail call double @llvm.ldexp.f64.i64(double %l4, i64 %exp)
+ %l7 = tail call double @llvm.ldexp.f64.i64(double %l6, i64 %exp)
+ store double %l1, ptr %y, align 4
+ %arrayidx2.1 = getelementptr inbounds double, ptr %y, i64 1
+ store double %l3, ptr %arrayidx2.1, align 4
+ %arrayidx2.2 = getelementptr inbounds double, ptr %y, i64 2
+ store double %l5, ptr %arrayidx2.2, align 4
+ %arrayidx2.3 = getelementptr inbounds double, ptr %y, i64 3
+ store double %l7, ptr %arrayidx2.3, align 4
+ ret void
+}
+
+define void @ldexp_f32i32_i64(ptr %x, ptr %y, i32 %exp32, i64 %exp64) {
+; CHECK-LABEL: @ldexp_f32i32_i64(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, ptr [[X]], i64 1
+; CHECK-NEXT: [[L2:%.*]] = load float, ptr [[ARRAYIDX_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, ptr [[X]], i64 2
+; CHECK-NEXT: [[L4:%.*]] = load float, ptr [[ARRAYIDX_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, ptr [[X]], i64 3
+; CHECK-NEXT: [[L6:%.*]] = load float, ptr [[ARRAYIDX_3]], align 4
+; CHECK-NEXT: [[L1:%.*]] = tail call float @llvm.ldexp.f32.i32(float [[L0]], i32 [[EXP32:%.*]])
+; CHECK-NEXT: [[L3:%.*]] = tail call float @llvm.ldexp.f32.i32(float [[L2]], i32 [[EXP32]])
+; CHECK-NEXT: [[L5:%.*]] = tail call float @llvm.ldexp.f32.i64(float [[L4]], i64 [[EXP64:%.*]])
+; CHECK-NEXT: [[L7:%.*]] = tail call float @llvm.ldexp.f32.i64(float [[L6]], i64 [[EXP64]])
+; CHECK-NEXT: store float [[L1]], ptr [[Y:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 1
+; CHECK-NEXT: store float [[L3]], ptr [[ARRAYIDX2_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 2
+; CHECK-NEXT: store float [[L5]], ptr [[ARRAYIDX2_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 3
+; CHECK-NEXT: store float [[L7]], ptr [[ARRAYIDX2_3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %l0 = load float, ptr %x, align 4
+ %arrayidx.1 = getelementptr inbounds float, ptr %x, i64 1
+ %l2 = load float, ptr %arrayidx.1, align 4
+ %arrayidx.2 = getelementptr inbounds float, ptr %x, i64 2
+ %l4 = load float, ptr %arrayidx.2, align 4
+ %arrayidx.3 = getelementptr inbounds float, ptr %x, i64 3
+ %l6 = load float, ptr %arrayidx.3, align 4
+ %l1 = tail call float @llvm.ldexp.f32.i32(float %l0, i32 %exp32)
+ %l3 = tail call float @llvm.ldexp.f32.i32(float %l2, i32 %exp32)
+ %l5 = tail call float @llvm.ldexp.f32.i64(float %l4, i64 %exp64)
+ %l7 = tail call float @llvm.ldexp.f32.i64(float %l6, i64 %exp64)
+ store float %l1, ptr %y, align 4
+ %arrayidx2.1 = getelementptr inbounds float, ptr %y, i64 1
+ store float %l3, ptr %arrayidx2.1, align 4
+ %arrayidx2.2 = getelementptr inbounds float, ptr %y, i64 2
+ store float %l5, ptr %arrayidx2.2, align 4
+ %arrayidx2.3 = getelementptr inbounds float, ptr %y, i64 3
+ store float %l7, ptr %arrayidx2.3, align 4
+ ret void
+}
+
+define void @ldexp_f64_i32_i64(ptr %x, ptr %y, i32 %exp32, i64 %exp64) {
+; CHECK-LABEL: @ldexp_f64_i32_i64(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[L0:%.*]] = load double, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds double, ptr [[X]], i64 1
+; CHECK-NEXT: [[L2:%.*]] = load double, ptr [[ARRAYIDX_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds double, ptr [[X]], i64 2
+; CHECK-NEXT: [[L4:%.*]] = load double, ptr [[ARRAYIDX_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds double, ptr [[X]], i64 3
+; CHECK-NEXT: [[L6:%.*]] = load double, ptr [[ARRAYIDX_3]], align 4
+; CHECK-NEXT: [[L1:%.*]] = tail call double @llvm.ldexp.f64.i32(double [[L0]], i32 [[EXP32:%.*]])
+; CHECK-NEXT: [[L3:%.*]] = tail call double @llvm.ldexp.f64.i32(double [[L2]], i32 [[EXP32]])
+; CHECK-NEXT: [[L5:%.*]] = tail call double @llvm.ldexp.f64.i64(double [[L4]], i64 [[EXP64:%.*]])
+; CHECK-NEXT: [[L7:%.*]] = tail call double @llvm.ldexp.f64.i64(double [[L6]], i64 [[EXP64]])
+; CHECK-NEXT: store double [[L1]], ptr [[Y:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 1
+; CHECK-NEXT: store double [[L3]], ptr [[ARRAYIDX2_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 2
+; CHECK-NEXT: store double [[L5]], ptr [[ARRAYIDX2_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds double, ptr [[Y]], i64 3
+; CHECK-NEXT: store double [[L7]], ptr [[ARRAYIDX2_3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %l0 = load double, ptr %x, align 4
+ %arrayidx.1 = getelementptr inbounds double, ptr %x, i64 1
+ %l2 = load double, ptr %arrayidx.1, align 4
+ %arrayidx.2 = getelementptr inbounds double, ptr %x, i64 2
+ %l4 = load double, ptr %arrayidx.2, align 4
+ %arrayidx.3 = getelementptr inbounds double, ptr %x, i64 3
+ %l6 = load double, ptr %arrayidx.3, align 4
+ %l1 = tail call double @llvm.ldexp.f64.i32(double %l0, i32 %exp32)
+ %l3 = tail call double @llvm.ldexp.f64.i32(double %l2, i32 %exp32)
+ %l5 = tail call double @llvm.ldexp.f64.i64(double %l4, i64 %exp64)
+ %l7 = tail call double @llvm.ldexp.f64.i64(double %l6, i64 %exp64)
+ store double %l1, ptr %y, align 4
+ %arrayidx2.1 = getelementptr inbounds double, ptr %y, i64 1
+ store double %l3, ptr %arrayidx2.1, align 4
+ %arrayidx2.2 = getelementptr inbounds double, ptr %y, i64 2
+ store double %l5, ptr %arrayidx2.2, align 4
+ %arrayidx2.3 = getelementptr inbounds double, ptr %y, i64 3
+ store double %l7, ptr %arrayidx2.3, align 4
+ ret void
+}
+
+declare float @llvm.ldexp.f32.i32(float, i32)
+declare double @llvm.ldexp.f64.i32(double, i32)
+declare float @llvm.ldexp.f32.i64(float, i64)
+declare double @llvm.ldexp.f64.i64(double, i64)
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/fround.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/fround.ll
new file mode 100644
index 0000000..07a3fe7
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/fround.ll
@@ -0,0 +1,280 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S -passes=slp-vectorizer -mtriple=aarch64 < %s | FileCheck %s
+
+target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
+
+define void @lround_i32f32(ptr %x, ptr %y, i32 %n) {
+; CHECK-LABEL: @lround_i32f32(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, ptr [[X]], i64 1
+; CHECK-NEXT: [[L2:%.*]] = load float, ptr [[ARRAYIDX_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, ptr [[X]], i64 2
+; CHECK-NEXT: [[L4:%.*]] = load float, ptr [[ARRAYIDX_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, ptr [[X]], i64 3
+; CHECK-NEXT: [[L6:%.*]] = load float, ptr [[ARRAYIDX_3]], align 4
+; CHECK-NEXT: [[L1:%.*]] = tail call i32 @llvm.lround.i32.f32(float [[L0]])
+; CHECK-NEXT: [[L3:%.*]] = tail call i32 @llvm.lround.i32.f32(float [[L2]])
+; CHECK-NEXT: [[L5:%.*]] = tail call i32 @llvm.lround.i32.f32(float [[L4]])
+; CHECK-NEXT: [[L7:%.*]] = tail call i32 @llvm.lround.i32.f32(float [[L6]])
+; CHECK-NEXT: store i32 [[L1]], ptr [[Y:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 1
+; CHECK-NEXT: store i32 [[L3]], ptr [[ARRAYIDX2_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 2
+; CHECK-NEXT: store i32 [[L5]], ptr [[ARRAYIDX2_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 3
+; CHECK-NEXT: store i32 [[L7]], ptr [[ARRAYIDX2_3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %l0 = load float, ptr %x, align 4
+ %arrayidx.1 = getelementptr inbounds float, ptr %x, i64 1
+ %l2 = load float, ptr %arrayidx.1, align 4
+ %arrayidx.2 = getelementptr inbounds float, ptr %x, i64 2
+ %l4 = load float, ptr %arrayidx.2, align 4
+ %arrayidx.3 = getelementptr inbounds float, ptr %x, i64 3
+ %l6 = load float, ptr %arrayidx.3, align 4
+ %l1 = tail call i32 @llvm.lround.i32.f32(float %l0)
+ %l3 = tail call i32 @llvm.lround.i32.f32(float %l2)
+ %l5 = tail call i32 @llvm.lround.i32.f32(float %l4)
+ %l7 = tail call i32 @llvm.lround.i32.f32(float %l6)
+ store i32 %l1, ptr %y, align 4
+ %arrayidx2.1 = getelementptr inbounds i32, ptr %y, i64 1
+ store i32 %l3, ptr %arrayidx2.1, align 4
+ %arrayidx2.2 = getelementptr inbounds i32, ptr %y, i64 2
+ store i32 %l5, ptr %arrayidx2.2, align 4
+ %arrayidx2.3 = getelementptr inbounds i32, ptr %y, i64 3
+ store i32 %l7, ptr %arrayidx2.3, align 4
+ ret void
+}
+
+define void @lround_i32f64(ptr %x, ptr %y, i32 %n) {
+; CHECK-LABEL: @lround_i32f64(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[L0:%.*]] = load double, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds double, ptr [[X]], i64 1
+; CHECK-NEXT: [[L2:%.*]] = load double, ptr [[ARRAYIDX_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds double, ptr [[X]], i64 2
+; CHECK-NEXT: [[L4:%.*]] = load double, ptr [[ARRAYIDX_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds double, ptr [[X]], i64 3
+; CHECK-NEXT: [[L6:%.*]] = load double, ptr [[ARRAYIDX_3]], align 4
+; CHECK-NEXT: [[L1:%.*]] = tail call i32 @llvm.lround.i32.f64(double [[L0]])
+; CHECK-NEXT: [[L3:%.*]] = tail call i32 @llvm.lround.i32.f64(double [[L2]])
+; CHECK-NEXT: [[L5:%.*]] = tail call i32 @llvm.lround.i32.f64(double [[L4]])
+; CHECK-NEXT: [[L7:%.*]] = tail call i32 @llvm.lround.i32.f64(double [[L6]])
+; CHECK-NEXT: store i32 [[L1]], ptr [[Y:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 1
+; CHECK-NEXT: store i32 [[L3]], ptr [[ARRAYIDX2_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 2
+; CHECK-NEXT: store i32 [[L5]], ptr [[ARRAYIDX2_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i32, ptr [[Y]], i64 3
+; CHECK-NEXT: store i32 [[L7]], ptr [[ARRAYIDX2_3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %l0 = load double, ptr %x, align 4
+ %arrayidx.1 = getelementptr inbounds double, ptr %x, i64 1
+ %l2 = load double, ptr %arrayidx.1, align 4
+ %arrayidx.2 = getelementptr inbounds double, ptr %x, i64 2
+ %l4 = load double, ptr %arrayidx.2, align 4
+ %arrayidx.3 = getelementptr inbounds double, ptr %x, i64 3
+ %l6 = load double, ptr %arrayidx.3, align 4
+ %l1 = tail call i32 @llvm.lround.i32.f64(double %l0)
+ %l3 = tail call i32 @llvm.lround.i32.f64(double %l2)
+ %l5 = tail call i32 @llvm.lround.i32.f64(double %l4)
+ %l7 = tail call i32 @llvm.lround.i32.f64(double %l6)
+ store i32 %l1, ptr %y, align 4
+ %arrayidx2.1 = getelementptr inbounds i32, ptr %y, i64 1
+ store i32 %l3, ptr %arrayidx2.1, align 4
+ %arrayidx2.2 = getelementptr inbounds i32, ptr %y, i64 2
+ store i32 %l5, ptr %arrayidx2.2, align 4
+ %arrayidx2.3 = getelementptr inbounds i32, ptr %y, i64 3
+ store i32 %l7, ptr %arrayidx2.3, align 4
+ ret void
+}
+
+define void @lround_i64f32(ptr %x, ptr %y, i64 %n) {
+; CHECK-LABEL: @lround_i64f32(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, ptr [[X]], i64 1
+; CHECK-NEXT: [[L2:%.*]] = load float, ptr [[ARRAYIDX_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, ptr [[X]], i64 2
+; CHECK-NEXT: [[L4:%.*]] = load float, ptr [[ARRAYIDX_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, ptr [[X]], i64 3
+; CHECK-NEXT: [[L6:%.*]] = load float, ptr [[ARRAYIDX_3]], align 4
+; CHECK-NEXT: [[L1:%.*]] = tail call i64 @llvm.lround.i64.f32(float [[L0]])
+; CHECK-NEXT: [[L3:%.*]] = tail call i64 @llvm.lround.i64.f32(float [[L2]])
+; CHECK-NEXT: [[L5:%.*]] = tail call i64 @llvm.lround.i64.f32(float [[L4]])
+; CHECK-NEXT: [[L7:%.*]] = tail call i64 @llvm.lround.i64.f32(float [[L6]])
+; CHECK-NEXT: store i64 [[L1]], ptr [[Y:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 1
+; CHECK-NEXT: store i64 [[L3]], ptr [[ARRAYIDX2_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 2
+; CHECK-NEXT: store i64 [[L5]], ptr [[ARRAYIDX2_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 3
+; CHECK-NEXT: store i64 [[L7]], ptr [[ARRAYIDX2_3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %l0 = load float, ptr %x, align 4
+ %arrayidx.1 = getelementptr inbounds float, ptr %x, i64 1
+ %l2 = load float, ptr %arrayidx.1, align 4
+ %arrayidx.2 = getelementptr inbounds float, ptr %x, i64 2
+ %l4 = load float, ptr %arrayidx.2, align 4
+ %arrayidx.3 = getelementptr inbounds float, ptr %x, i64 3
+ %l6 = load float, ptr %arrayidx.3, align 4
+ %l1 = tail call i64 @llvm.lround.i64.f32(float %l0)
+ %l3 = tail call i64 @llvm.lround.i64.f32(float %l2)
+ %l5 = tail call i64 @llvm.lround.i64.f32(float %l4)
+ %l7 = tail call i64 @llvm.lround.i64.f32(float %l6)
+ store i64 %l1, ptr %y, align 4
+ %arrayidx2.1 = getelementptr inbounds i64, ptr %y, i64 1
+ store i64 %l3, ptr %arrayidx2.1, align 4
+ %arrayidx2.2 = getelementptr inbounds i64, ptr %y, i64 2
+ store i64 %l5, ptr %arrayidx2.2, align 4
+ %arrayidx2.3 = getelementptr inbounds i64, ptr %y, i64 3
+ store i64 %l7, ptr %arrayidx2.3, align 4
+ ret void
+}
+
+define void @lround_i64f64(ptr %x, ptr %y, i64 %n) {
+; CHECK-LABEL: @lround_i64f64(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[L0:%.*]] = load double, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds double, ptr [[X]], i64 1
+; CHECK-NEXT: [[L2:%.*]] = load double, ptr [[ARRAYIDX_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds double, ptr [[X]], i64 2
+; CHECK-NEXT: [[L4:%.*]] = load double, ptr [[ARRAYIDX_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds double, ptr [[X]], i64 3
+; CHECK-NEXT: [[L6:%.*]] = load double, ptr [[ARRAYIDX_3]], align 4
+; CHECK-NEXT: [[L1:%.*]] = tail call i64 @llvm.lround.i64.f64(double [[L0]])
+; CHECK-NEXT: [[L3:%.*]] = tail call i64 @llvm.lround.i64.f64(double [[L2]])
+; CHECK-NEXT: [[L5:%.*]] = tail call i64 @llvm.lround.i64.f64(double [[L4]])
+; CHECK-NEXT: [[L7:%.*]] = tail call i64 @llvm.lround.i64.f64(double [[L6]])
+; CHECK-NEXT: store i64 [[L1]], ptr [[Y:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 1
+; CHECK-NEXT: store i64 [[L3]], ptr [[ARRAYIDX2_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 2
+; CHECK-NEXT: store i64 [[L5]], ptr [[ARRAYIDX2_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 3
+; CHECK-NEXT: store i64 [[L7]], ptr [[ARRAYIDX2_3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %l0 = load double, ptr %x, align 4
+ %arrayidx.1 = getelementptr inbounds double, ptr %x, i64 1
+ %l2 = load double, ptr %arrayidx.1, align 4
+ %arrayidx.2 = getelementptr inbounds double, ptr %x, i64 2
+ %l4 = load double, ptr %arrayidx.2, align 4
+ %arrayidx.3 = getelementptr inbounds double, ptr %x, i64 3
+ %l6 = load double, ptr %arrayidx.3, align 4
+ %l1 = tail call i64 @llvm.lround.i64.f64(double %l0)
+ %l3 = tail call i64 @llvm.lround.i64.f64(double %l2)
+ %l5 = tail call i64 @llvm.lround.i64.f64(double %l4)
+ %l7 = tail call i64 @llvm.lround.i64.f64(double %l6)
+ store i64 %l1, ptr %y, align 4
+ %arrayidx2.1 = getelementptr inbounds i64, ptr %y, i64 1
+ store i64 %l3, ptr %arrayidx2.1, align 4
+ %arrayidx2.2 = getelementptr inbounds i64, ptr %y, i64 2
+ store i64 %l5, ptr %arrayidx2.2, align 4
+ %arrayidx2.3 = getelementptr inbounds i64, ptr %y, i64 3
+ store i64 %l7, ptr %arrayidx2.3, align 4
+ ret void
+}
+
+define void @llround_i64f32(ptr %x, ptr %y, i64 %n) {
+; CHECK-LABEL: @llround_i64f32(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[L0:%.*]] = load float, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, ptr [[X]], i64 1
+; CHECK-NEXT: [[L2:%.*]] = load float, ptr [[ARRAYIDX_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, ptr [[X]], i64 2
+; CHECK-NEXT: [[L4:%.*]] = load float, ptr [[ARRAYIDX_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, ptr [[X]], i64 3
+; CHECK-NEXT: [[L6:%.*]] = load float, ptr [[ARRAYIDX_3]], align 4
+; CHECK-NEXT: [[L1:%.*]] = tail call i64 @llvm.llround.i64.f32(float [[L0]])
+; CHECK-NEXT: [[L3:%.*]] = tail call i64 @llvm.llround.i64.f32(float [[L2]])
+; CHECK-NEXT: [[L5:%.*]] = tail call i64 @llvm.llround.i64.f32(float [[L4]])
+; CHECK-NEXT: [[L7:%.*]] = tail call i64 @llvm.llround.i64.f32(float [[L6]])
+; CHECK-NEXT: store i64 [[L1]], ptr [[Y:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 1
+; CHECK-NEXT: store i64 [[L3]], ptr [[ARRAYIDX2_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 2
+; CHECK-NEXT: store i64 [[L5]], ptr [[ARRAYIDX2_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 3
+; CHECK-NEXT: store i64 [[L7]], ptr [[ARRAYIDX2_3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %l0 = load float, ptr %x, align 4
+ %arrayidx.1 = getelementptr inbounds float, ptr %x, i64 1
+ %l2 = load float, ptr %arrayidx.1, align 4
+ %arrayidx.2 = getelementptr inbounds float, ptr %x, i64 2
+ %l4 = load float, ptr %arrayidx.2, align 4
+ %arrayidx.3 = getelementptr inbounds float, ptr %x, i64 3
+ %l6 = load float, ptr %arrayidx.3, align 4
+ %l1 = tail call i64 @llvm.llround.i64.f32(float %l0)
+ %l3 = tail call i64 @llvm.llround.i64.f32(float %l2)
+ %l5 = tail call i64 @llvm.llround.i64.f32(float %l4)
+ %l7 = tail call i64 @llvm.llround.i64.f32(float %l6)
+ store i64 %l1, ptr %y, align 4
+ %arrayidx2.1 = getelementptr inbounds i64, ptr %y, i64 1
+ store i64 %l3, ptr %arrayidx2.1, align 4
+ %arrayidx2.2 = getelementptr inbounds i64, ptr %y, i64 2
+ store i64 %l5, ptr %arrayidx2.2, align 4
+ %arrayidx2.3 = getelementptr inbounds i64, ptr %y, i64 3
+ store i64 %l7, ptr %arrayidx2.3, align 4
+ ret void
+}
+
+define void @llround_i64f64(ptr %x, ptr %y, i64 %n) {
+; CHECK-LABEL: @llround_i64f64(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[L0:%.*]] = load double, ptr [[X:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds double, ptr [[X]], i64 1
+; CHECK-NEXT: [[L2:%.*]] = load double, ptr [[ARRAYIDX_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds double, ptr [[X]], i64 2
+; CHECK-NEXT: [[L4:%.*]] = load double, ptr [[ARRAYIDX_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds double, ptr [[X]], i64 3
+; CHECK-NEXT: [[L6:%.*]] = load double, ptr [[ARRAYIDX_3]], align 4
+; CHECK-NEXT: [[L1:%.*]] = tail call i64 @llvm.llround.i64.f64(double [[L0]])
+; CHECK-NEXT: [[L3:%.*]] = tail call i64 @llvm.llround.i64.f64(double [[L2]])
+; CHECK-NEXT: [[L5:%.*]] = tail call i64 @llvm.llround.i64.f64(double [[L4]])
+; CHECK-NEXT: [[L7:%.*]] = tail call i64 @llvm.llround.i64.f64(double [[L6]])
+; CHECK-NEXT: store i64 [[L1]], ptr [[Y:%.*]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 1
+; CHECK-NEXT: store i64 [[L3]], ptr [[ARRAYIDX2_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 2
+; CHECK-NEXT: store i64 [[L5]], ptr [[ARRAYIDX2_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i64, ptr [[Y]], i64 3
+; CHECK-NEXT: store i64 [[L7]], ptr [[ARRAYIDX2_3]], align 4
+; CHECK-NEXT: ret void
+;
+entry:
+ %l0 = load double, ptr %x, align 4
+ %arrayidx.1 = getelementptr inbounds double, ptr %x, i64 1
+ %l2 = load double, ptr %arrayidx.1, align 4
+ %arrayidx.2 = getelementptr inbounds double, ptr %x, i64 2
+ %l4 = load double, ptr %arrayidx.2, align 4
+ %arrayidx.3 = getelementptr inbounds double, ptr %x, i64 3
+ %l6 = load double, ptr %arrayidx.3, align 4
+ %l1 = tail call i64 @llvm.llround.i64.f64(double %l0)
+ %l3 = tail call i64 @llvm.llround.i64.f64(double %l2)
+ %l5 = tail call i64 @llvm.llround.i64.f64(double %l4)
+ %l7 = tail call i64 @llvm.llround.i64.f64(double %l6)
+ store i64 %l1, ptr %y, align 4
+ %arrayidx2.1 = getelementptr inbounds i64, ptr %y, i64 1
+ store i64 %l3, ptr %arrayidx2.1, align 4
+ %arrayidx2.2 = getelementptr inbounds i64, ptr %y, i64 2
+ store i64 %l5, ptr %arrayidx2.2, align 4
+ %arrayidx2.3 = getelementptr inbounds i64, ptr %y, i64 3
+ store i64 %l7, ptr %arrayidx2.3, align 4
+ ret void
+}
+
+declare i32 @llvm.lround.i32.f32(float)
+declare i64 @llvm.lround.i64.f32(float)
+declare i64 @llvm.lround.i64.f64(double)
+declare i64 @llvm.llround.i64.f32(float)
+declare i64 @llvm.llround.i64.f64(double)
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/reused-scalar-repeated-in-node.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/reused-scalar-repeated-in-node.ll
index 295a718..2e68432 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/reused-scalar-repeated-in-node.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/reused-scalar-repeated-in-node.ll
@@ -12,7 +12,8 @@ define void @test() {
; CHECK: [[BB63]]:
; CHECK-NEXT: br label %[[BB64]]
; CHECK: [[BB64]]:
-; CHECK-NEXT: [[TMP25:%.*]] = phi <16 x float> [ poison, %[[BB61]] ], [ poison, %[[BB63]] ], [ poison, %[[BB62]] ]
+; CHECK-NEXT: [[I65:%.*]] = phi nsz float [ poison, %[[BB61]] ], [ poison, %[[BB63]] ], [ poison, %[[BB62]] ]
+; CHECK-NEXT: [[I77:%.*]] = phi nsz float [ poison, %[[BB61]] ], [ poison, %[[BB63]] ], [ poison, %[[BB62]] ]
; CHECK-NEXT: [[I66:%.*]] = load float, ptr poison, align 16
; CHECK-NEXT: [[I67:%.*]] = load float, ptr poison, align 4
; CHECK-NEXT: [[I68:%.*]] = load float, ptr poison, align 8
@@ -24,57 +25,125 @@ define void @test() {
; CHECK-NEXT: [[I74:%.*]] = load float, ptr poison, align 4
; CHECK-NEXT: [[I75:%.*]] = load float, ptr poison, align 16
; CHECK-NEXT: [[I76:%.*]] = load float, ptr poison, align 4
-; CHECK-NEXT: [[TMP1:%.*]] = insertelement <16 x float> poison, float [[I76]], i32 0
-; CHECK-NEXT: [[TMP2:%.*]] = insertelement <16 x float> [[TMP1]], float [[I75]], i32 1
-; CHECK-NEXT: [[TMP3:%.*]] = insertelement <16 x float> [[TMP2]], float [[I74]], i32 2
-; CHECK-NEXT: [[TMP4:%.*]] = insertelement <16 x float> [[TMP3]], float [[I73]], i32 3
-; CHECK-NEXT: [[TMP5:%.*]] = insertelement <16 x float> [[TMP4]], float [[I71]], i32 4
-; CHECK-NEXT: [[TMP6:%.*]] = insertelement <16 x float> [[TMP5]], float [[I70]], i32 5
-; CHECK-NEXT: [[TMP7:%.*]] = insertelement <16 x float> [[TMP6]], float [[I68]], i32 6
-; CHECK-NEXT: [[TMP8:%.*]] = insertelement <16 x float> [[TMP7]], float [[I66]], i32 7
-; CHECK-NEXT: [[TMP9:%.*]] = insertelement <16 x float> [[TMP8]], float [[I72]], i32 13
-; CHECK-NEXT: [[TMP10:%.*]] = insertelement <16 x float> [[TMP9]], float [[I67]], i32 14
-; CHECK-NEXT: [[TMP11:%.*]] = insertelement <16 x float> [[TMP10]], float [[I69]], i32 15
; CHECK-NEXT: br i1 poison, label %[[BB167:.*]], label %[[BB77:.*]]
; CHECK: [[BB77]]:
-; CHECK-NEXT: [[TMP12:%.*]] = shufflevector <16 x float> [[TMP11]], <16 x float> poison, <8 x i32> <i32 poison, i32 poison, i32 poison, i32 poison, i32 14, i32 15, i32 poison, i32 poison>
-; CHECK-NEXT: [[TMP17:%.*]] = insertelement <8 x float> poison, float [[I70]], i32 0
-; CHECK-NEXT: [[TMP23:%.*]] = shufflevector <8 x float> [[TMP12]], <8 x float> [[TMP17]], <8 x i32> <i32 8, i32 poison, i32 poison, i32 poison, i32 4, i32 5, i32 poison, i32 poison>
-; CHECK-NEXT: [[TMP14:%.*]] = insertelement <8 x float> poison, float [[I70]], i32 1
-; CHECK-NEXT: [[TMP19:%.*]] = insertelement <8 x float> [[TMP14]], float [[I68]], i32 2
-; CHECK-NEXT: [[TMP16:%.*]] = insertelement <8 x float> [[TMP19]], float [[I66]], i32 3
-; CHECK-NEXT: [[TMP20:%.*]] = insertelement <8 x float> [[TMP16]], float [[I67]], i32 6
-; CHECK-NEXT: [[TMP21:%.*]] = insertelement <8 x float> [[TMP20]], float [[I69]], i32 7
-; CHECK-NEXT: [[TMP39:%.*]] = shufflevector <16 x float> [[TMP25]], <16 x float> poison, <16 x i32> <i32 poison, i32 poison, i32 3, i32 2, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
-; CHECK-NEXT: [[TMP13:%.*]] = shufflevector <16 x float> [[TMP39]], <16 x float> [[TMP25]], <16 x i32> <i32 poison, i32 poison, i32 2, i32 3, i32 18, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 19, i32 poison, i32 poison>
; CHECK-NEXT: br label %[[BB78:.*]]
; CHECK: [[BB78]]:
-; CHECK-NEXT: [[TMP15:%.*]] = phi <8 x float> [ [[TMP23]], %[[BB77]] ], [ [[TMP36:%.*]], %[[BB78]] ]
-; CHECK-NEXT: [[TMP22:%.*]] = phi <8 x float> [ [[TMP21]], %[[BB77]] ], [ [[TMP31:%.*]], %[[BB78]] ]
-; CHECK-NEXT: [[TMP24:%.*]] = shufflevector <8 x float> [[TMP22]], <8 x float> poison, <16 x i32> <i32 0, i32 3, i32 1, i32 2, i32 3, i32 0, i32 2, i32 3, i32 2, i32 6, i32 2, i32 3, i32 0, i32 7, i32 6, i32 6>
-; CHECK-NEXT: [[TMP38:%.*]] = shufflevector <8 x float> [[TMP15]], <8 x float> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 1, i32 0, i32 3, i32 1, i32 3, i32 5, i32 3, i32 1, i32 0, i32 4, i32 5, i32 5>
-; CHECK-NEXT: [[TMP18:%.*]] = fmul fast <16 x float> [[TMP24]], [[TMP13]]
-; CHECK-NEXT: [[TMP26:%.*]] = fmul fast <16 x float> [[TMP38]], [[TMP25]]
-; CHECK-NEXT: [[TMP27:%.*]] = fadd fast <16 x float> [[TMP26]], [[TMP18]]
-; CHECK-NEXT: [[TMP28:%.*]] = fadd fast <16 x float> [[TMP27]], poison
-; CHECK-NEXT: [[TMP29:%.*]] = fadd fast <16 x float> [[TMP28]], poison
-; CHECK-NEXT: [[TMP36]] = shufflevector <16 x float> [[TMP29]], <16 x float> poison, <8 x i32> <i32 5, i32 11, i32 12, i32 10, i32 14, i32 15, i32 poison, i32 poison>
-; CHECK-NEXT: [[TMP31]] = shufflevector <16 x float> [[TMP29]], <16 x float> poison, <8 x i32> <i32 12, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 14, i32 15>
+; CHECK-NEXT: [[I85:%.*]] = phi nsz float [ [[I66]], %[[BB77]] ], [ [[I103:%.*]], %[[BB78]] ]
+; CHECK-NEXT: [[I80:%.*]] = phi nsz float [ [[I67]], %[[BB77]] ], [ [[I104:%.*]], %[[BB78]] ]
+; CHECK-NEXT: [[I81:%.*]] = phi nsz float [ [[I68]], %[[BB77]] ], [ [[I105:%.*]], %[[BB78]] ]
+; CHECK-NEXT: [[I82:%.*]] = phi nsz float [ poison, %[[BB77]] ], [ [[I106:%.*]], %[[BB78]] ]
+; CHECK-NEXT: [[I84:%.*]] = phi nsz float [ poison, %[[BB77]] ], [ [[I123:%.*]], %[[BB78]] ]
+; CHECK-NEXT: [[I127:%.*]] = phi nsz float [ [[I69]], %[[BB77]] ], [ [[I124:%.*]], %[[BB78]] ]
+; CHECK-NEXT: [[I131:%.*]] = phi nsz float [ poison, %[[BB77]] ], [ [[I125:%.*]], %[[BB78]] ]
+; CHECK-NEXT: [[I86:%.*]] = phi nsz float [ [[I70]], %[[BB77]] ], [ [[I126:%.*]], %[[BB78]] ]
+; CHECK-NEXT: [[I87:%.*]] = fmul fast float [[I85]], poison
+; CHECK-NEXT: [[I88:%.*]] = fmul fast float [[I80]], poison
+; CHECK-NEXT: [[I89:%.*]] = fmul fast float [[I81]], poison
+; CHECK-NEXT: [[I90:%.*]] = fmul fast float [[I82]], poison
+; CHECK-NEXT: [[I91:%.*]] = fmul fast float [[I84]], poison
+; CHECK-NEXT: [[I92:%.*]] = fadd fast float [[I91]], [[I87]]
+; CHECK-NEXT: [[I93:%.*]] = fmul fast float [[I127]], poison
+; CHECK-NEXT: [[I94:%.*]] = fadd fast float [[I93]], [[I88]]
+; CHECK-NEXT: [[I95:%.*]] = fmul fast float [[I131]], poison
+; CHECK-NEXT: [[I96:%.*]] = fadd fast float [[I95]], [[I89]]
+; CHECK-NEXT: [[I97:%.*]] = fmul fast float [[I86]], poison
+; CHECK-NEXT: [[I98:%.*]] = fadd fast float [[I97]], [[I90]]
+; CHECK-NEXT: [[I99:%.*]] = fadd fast float [[I92]], poison
+; CHECK-NEXT: [[I100:%.*]] = fadd fast float [[I94]], poison
+; CHECK-NEXT: [[I101:%.*]] = fadd fast float [[I96]], poison
+; CHECK-NEXT: [[I102:%.*]] = fadd fast float [[I98]], poison
+; CHECK-NEXT: [[I103]] = fadd fast float [[I99]], poison
+; CHECK-NEXT: [[I104]] = fadd fast float [[I100]], poison
+; CHECK-NEXT: [[I105]] = fadd fast float [[I101]], poison
+; CHECK-NEXT: [[I106]] = fadd fast float [[I102]], poison
+; CHECK-NEXT: [[I107:%.*]] = fmul fast float [[I85]], poison
+; CHECK-NEXT: [[I108:%.*]] = fmul fast float [[I80]], poison
+; CHECK-NEXT: [[I109:%.*]] = fmul fast float [[I81]], poison
+; CHECK-NEXT: [[I110:%.*]] = fmul fast float [[I82]], poison
+; CHECK-NEXT: [[I111:%.*]] = fmul fast float [[I84]], poison
+; CHECK-NEXT: [[I112:%.*]] = fadd fast float [[I111]], [[I107]]
+; CHECK-NEXT: [[I113:%.*]] = fmul fast float [[I127]], poison
+; CHECK-NEXT: [[I114:%.*]] = fadd fast float [[I113]], [[I108]]
+; CHECK-NEXT: [[I115:%.*]] = fmul fast float [[I131]], poison
+; CHECK-NEXT: [[I116:%.*]] = fadd fast float [[I115]], [[I109]]
+; CHECK-NEXT: [[I117:%.*]] = fmul fast float [[I86]], poison
+; CHECK-NEXT: [[I118:%.*]] = fadd fast float [[I117]], [[I110]]
+; CHECK-NEXT: [[I119:%.*]] = fadd fast float [[I112]], poison
+; CHECK-NEXT: [[I120:%.*]] = fadd fast float [[I114]], poison
+; CHECK-NEXT: [[I121:%.*]] = fadd fast float [[I116]], poison
+; CHECK-NEXT: [[I122:%.*]] = fadd fast float [[I118]], poison
+; CHECK-NEXT: [[I123]] = fadd fast float [[I119]], poison
+; CHECK-NEXT: [[I124]] = fadd fast float [[I120]], poison
+; CHECK-NEXT: [[I125]] = fadd fast float [[I121]], poison
+; CHECK-NEXT: [[I126]] = fadd fast float [[I122]], poison
+; CHECK-NEXT: [[I135:%.*]] = fmul fast float [[I85]], [[I65]]
+; CHECK-NEXT: [[I128:%.*]] = fmul fast float [[I80]], [[I65]]
+; CHECK-NEXT: [[I129:%.*]] = fmul fast float [[I81]], [[I65]]
+; CHECK-NEXT: [[I130:%.*]] = fmul fast float [[I82]], [[I65]]
+; CHECK-NEXT: [[I133:%.*]] = fmul fast float [[I84]], [[I77]]
+; CHECK-NEXT: [[I134:%.*]] = fadd fast float [[I133]], [[I135]]
+; CHECK-NEXT: [[I136:%.*]] = fmul fast float [[I127]], [[I77]]
+; CHECK-NEXT: [[TMP51:%.*]] = fadd fast float [[I136]], [[I128]]
+; CHECK-NEXT: [[I138:%.*]] = fmul fast float [[I131]], [[I77]]
+; CHECK-NEXT: [[TMP52:%.*]] = fadd fast float [[I138]], [[I129]]
+; CHECK-NEXT: [[I137:%.*]] = fmul fast float [[I86]], [[I77]]
+; CHECK-NEXT: [[I139:%.*]] = fadd fast float [[I137]], [[I130]]
+; CHECK-NEXT: [[I140:%.*]] = fadd fast float [[I134]], poison
+; CHECK-NEXT: [[I141:%.*]] = fadd fast float [[TMP51]], poison
+; CHECK-NEXT: [[I142:%.*]] = fadd fast float [[TMP52]], poison
+; CHECK-NEXT: [[I143:%.*]] = fadd fast float [[I139]], poison
+; CHECK-NEXT: [[I144:%.*]] = fadd fast float [[I140]], poison
+; CHECK-NEXT: [[I145:%.*]] = fadd fast float [[I141]], poison
+; CHECK-NEXT: [[I146:%.*]] = fadd fast float [[I142]], poison
+; CHECK-NEXT: [[I152:%.*]] = fadd fast float [[I143]], poison
+; CHECK-NEXT: [[I147:%.*]] = fmul fast float [[I85]], poison
+; CHECK-NEXT: [[I148:%.*]] = fmul fast float [[I80]], poison
+; CHECK-NEXT: [[I149:%.*]] = fmul fast float [[I81]], poison
+; CHECK-NEXT: [[I150:%.*]] = fmul fast float [[I82]], poison
+; CHECK-NEXT: [[I151:%.*]] = fmul fast float [[I84]], poison
+; CHECK-NEXT: [[TMP57:%.*]] = fadd fast float [[I151]], [[I147]]
+; CHECK-NEXT: [[I153:%.*]] = fmul fast float [[I127]], poison
+; CHECK-NEXT: [[TMP58:%.*]] = fadd fast float [[I153]], [[I148]]
+; CHECK-NEXT: [[I155:%.*]] = fmul fast float [[I131]], poison
+; CHECK-NEXT: [[TMP59:%.*]] = fadd fast float [[I155]], [[I149]]
+; CHECK-NEXT: [[I157:%.*]] = fmul fast float [[I86]], poison
+; CHECK-NEXT: [[TMP60:%.*]] = fadd fast float [[I157]], [[I150]]
+; CHECK-NEXT: [[I159:%.*]] = fadd fast float [[TMP57]], poison
+; CHECK-NEXT: [[I160:%.*]] = fadd fast float [[TMP58]], poison
+; CHECK-NEXT: [[I161:%.*]] = fadd fast float [[TMP59]], poison
+; CHECK-NEXT: [[I162:%.*]] = fadd fast float [[TMP60]], poison
+; CHECK-NEXT: [[I163:%.*]] = fadd fast float [[I159]], poison
+; CHECK-NEXT: [[I164:%.*]] = fadd fast float [[I160]], poison
+; CHECK-NEXT: [[I165:%.*]] = fadd fast float [[I161]], poison
+; CHECK-NEXT: [[I166:%.*]] = fadd fast float [[I162]], poison
; CHECK-NEXT: br i1 poison, label %[[BB78]], label %[[BB167]]
; CHECK: [[BB167]]:
-; CHECK-NEXT: [[TMP32:%.*]] = phi <16 x float> [ [[TMP11]], %[[BB64]] ], [ [[TMP29]], %[[BB78]] ]
-; CHECK-NEXT: [[TMP33:%.*]] = extractelement <16 x float> [[TMP32]], i32 14
+; CHECK-NEXT: [[I168:%.*]] = phi nsz float [ [[I76]], %[[BB64]] ], [ [[I166]], %[[BB78]] ]
+; CHECK-NEXT: [[I169:%.*]] = phi nsz float [ poison, %[[BB64]] ], [ [[I165]], %[[BB78]] ]
+; CHECK-NEXT: [[I170:%.*]] = phi nsz float [ poison, %[[BB64]] ], [ [[I164]], %[[BB78]] ]
+; CHECK-NEXT: [[I171:%.*]] = phi nsz float [ [[I75]], %[[BB64]] ], [ [[I163]], %[[BB78]] ]
+; CHECK-NEXT: [[I172:%.*]] = phi nsz float [ [[I74]], %[[BB64]] ], [ [[I152]], %[[BB78]] ]
+; CHECK-NEXT: [[I173:%.*]] = phi nsz float [ [[I73]], %[[BB64]] ], [ [[I146]], %[[BB78]] ]
+; CHECK-NEXT: [[TMP34:%.*]] = phi nsz float [ [[I72]], %[[BB64]] ], [ [[I145]], %[[BB78]] ]
+; CHECK-NEXT: [[I175:%.*]] = phi nsz float [ [[I71]], %[[BB64]] ], [ [[I144]], %[[BB78]] ]
+; CHECK-NEXT: [[I176:%.*]] = phi nsz float [ [[I70]], %[[BB64]] ], [ [[I126]], %[[BB78]] ]
+; CHECK-NEXT: [[I177:%.*]] = phi nsz float [ poison, %[[BB64]] ], [ [[I125]], %[[BB78]] ]
+; CHECK-NEXT: [[I178:%.*]] = phi nsz float [ [[I69]], %[[BB64]] ], [ [[I124]], %[[BB78]] ]
+; CHECK-NEXT: [[I179:%.*]] = phi nsz float [ poison, %[[BB64]] ], [ [[I123]], %[[BB78]] ]
+; CHECK-NEXT: [[I180:%.*]] = phi nsz float [ poison, %[[BB64]] ], [ [[I106]], %[[BB78]] ]
+; CHECK-NEXT: [[I181:%.*]] = phi nsz float [ [[I68]], %[[BB64]] ], [ [[I105]], %[[BB78]] ]
+; CHECK-NEXT: [[TMP33:%.*]] = phi nsz float [ [[I67]], %[[BB64]] ], [ [[I104]], %[[BB78]] ]
+; CHECK-NEXT: [[I183:%.*]] = phi nsz float [ [[I66]], %[[BB64]] ], [ [[I103]], %[[BB78]] ]
; CHECK-NEXT: store float [[TMP33]], ptr poison, align 1
-; CHECK-NEXT: [[TMP34:%.*]] = extractelement <16 x float> [[TMP32]], i32 13
; CHECK-NEXT: store float [[TMP34]], ptr poison, align 1
-; CHECK-NEXT: [[TMP35:%.*]] = extractelement <16 x float> [[TMP32]], i32 15
; CHECK-NEXT: br i1 poison, label %[[BB186:.*]], label %[[BB184:.*]]
; CHECK: [[BB184]]:
; CHECK-NEXT: br label %[[BB185:.*]]
; CHECK: [[BB185]]:
; CHECK-NEXT: br i1 poison, label %[[BB185]], label %[[BB186]]
; CHECK: [[BB186]]:
-; CHECK-NEXT: [[I187:%.*]] = phi nsz float [ [[TMP35]], %[[BB167]] ], [ poison, %[[BB185]] ]
+; CHECK-NEXT: [[I187:%.*]] = phi nsz float [ [[I178]], %[[BB167]] ], [ poison, %[[BB185]] ]
; CHECK-NEXT: ret void
;
entry:
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/scalarization-overhead.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/scalarization-overhead.ll
index 64bdcf2..8093285 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/scalarization-overhead.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/scalarization-overhead.ll
@@ -8,35 +8,56 @@
define fastcc i64 @zot(float %arg, float %arg1, float %arg2, float %arg3, float %arg4, ptr %arg5, i1 %arg6, i1 %arg7, i1 %arg8) {
; CHECK-LABEL: @zot(
; CHECK-NEXT: bb:
+; CHECK-NEXT: [[VAL:%.*]] = fmul fast float 0.000000e+00, 0.000000e+00
; CHECK-NEXT: [[VAL9:%.*]] = fmul fast float 0.000000e+00, [[ARG:%.*]]
-; CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x float> <float 0.000000e+00, float poison, float poison, float poison>, float [[ARG]], i32 1
-; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x float> [[TMP0]], float [[ARG3:%.*]], i32 2
-; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 2>
-; CHECK-NEXT: [[TMP3:%.*]] = fmul fast <4 x float> <float 0.000000e+00, float 0.000000e+00, float 1.000000e+00, float 1.000000e+00>, [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = insertelement <2 x float> <float poison, float 0.000000e+00>, float [[ARG3]], i32 0
-; CHECK-NEXT: [[TMP5:%.*]] = fadd fast <2 x float> [[TMP4]], <float 1.000000e+00, float 0.000000e+00>
-; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <2 x float> [[TMP5]], <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
-; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <4 x float> [[TMP2]], <4 x float> [[TMP9]], <4 x i32> <i32 4, i32 5, i32 2, i32 3>
-; CHECK-NEXT: [[TMP7:%.*]] = fadd fast <4 x float> [[TMP6]], <float 2.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>
+; CHECK-NEXT: [[VAL10:%.*]] = fmul fast float [[ARG3:%.*]], 1.000000e+00
+; CHECK-NEXT: [[VAL11:%.*]] = fmul fast float [[ARG3]], 1.000000e+00
+; CHECK-NEXT: [[VAL12:%.*]] = fadd fast float [[ARG3]], 1.000000e+00
+; CHECK-NEXT: [[VAL13:%.*]] = fadd fast float [[VAL12]], 2.000000e+00
+; CHECK-NEXT: [[VAL14:%.*]] = fadd fast float 0.000000e+00, 0.000000e+00
+; CHECK-NEXT: [[VAL15:%.*]] = fadd fast float [[VAL14]], 1.000000e+00
+; CHECK-NEXT: [[VAL16:%.*]] = fadd fast float [[ARG3]], 1.000000e+00
+; CHECK-NEXT: [[VAL17:%.*]] = fadd fast float [[ARG3]], 1.000000e+00
; CHECK-NEXT: br i1 [[ARG6:%.*]], label [[BB18:%.*]], label [[BB57:%.*]]
; CHECK: bb18:
-; CHECK-NEXT: [[TMP8:%.*]] = phi <4 x float> [ [[TMP7]], [[BB:%.*]] ]
-; CHECK-NEXT: [[VAL16:%.*]] = extractelement <4 x float> [[TMP7]], i32 2
+; CHECK-NEXT: [[VAL19:%.*]] = phi float [ [[VAL13]], [[BB:%.*]] ]
+; CHECK-NEXT: [[VAL20:%.*]] = phi float [ [[VAL15]], [[BB]] ]
+; CHECK-NEXT: [[VAL21:%.*]] = phi float [ [[VAL16]], [[BB]] ]
+; CHECK-NEXT: [[VAL22:%.*]] = phi float [ [[VAL17]], [[BB]] ]
; CHECK-NEXT: [[VAL23:%.*]] = fmul fast float [[VAL16]], 2.000000e+00
-; CHECK-NEXT: [[VAL17:%.*]] = extractelement <4 x float> [[TMP7]], i32 3
; CHECK-NEXT: [[VAL24:%.*]] = fmul fast float [[VAL17]], 3.000000e+00
; CHECK-NEXT: br i1 [[ARG7:%.*]], label [[BB25:%.*]], label [[BB57]]
; CHECK: bb25:
-; CHECK-NEXT: [[TMP11:%.*]] = phi <4 x float> [ [[TMP8]], [[BB18]] ]
+; CHECK-NEXT: [[VAL26:%.*]] = phi float [ [[VAL19]], [[BB18]] ]
+; CHECK-NEXT: [[VAL27:%.*]] = phi float [ [[VAL20]], [[BB18]] ]
+; CHECK-NEXT: [[VAL28:%.*]] = phi float [ [[VAL21]], [[BB18]] ]
+; CHECK-NEXT: [[VAL29:%.*]] = phi float [ [[VAL22]], [[BB18]] ]
; CHECK-NEXT: br label [[BB30:%.*]]
; CHECK: bb30:
; CHECK-NEXT: [[VAL31:%.*]] = phi float [ [[VAL55:%.*]], [[BB30]] ], [ 0.000000e+00, [[BB25]] ]
; CHECK-NEXT: [[VAL32:%.*]] = phi float [ [[VAL9]], [[BB30]] ], [ 0.000000e+00, [[BB25]] ]
-; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i8>, ptr [[ARG5:%.*]], align 1
-; CHECK-NEXT: [[TMP13:%.*]] = uitofp <4 x i8> [[TMP12]] to <4 x float>
-; CHECK-NEXT: [[TMP14:%.*]] = fsub fast <4 x float> [[TMP13]], [[TMP3]]
-; CHECK-NEXT: [[TMP15:%.*]] = fmul fast <4 x float> [[TMP14]], [[TMP11]]
-; CHECK-NEXT: [[VAL54:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[TMP15]])
+; CHECK-NEXT: [[VAL33:%.*]] = load i8, ptr [[ARG5:%.*]], align 1
+; CHECK-NEXT: [[VAL34:%.*]] = uitofp i8 [[VAL33]] to float
+; CHECK-NEXT: [[VAL35:%.*]] = getelementptr inbounds i8, ptr [[ARG5]], i64 1
+; CHECK-NEXT: [[VAL36:%.*]] = load i8, ptr [[VAL35]], align 1
+; CHECK-NEXT: [[VAL37:%.*]] = uitofp i8 [[VAL36]] to float
+; CHECK-NEXT: [[VAL38:%.*]] = getelementptr inbounds i8, ptr [[ARG5]], i64 2
+; CHECK-NEXT: [[VAL39:%.*]] = load i8, ptr [[VAL38]], align 1
+; CHECK-NEXT: [[VAL40:%.*]] = uitofp i8 [[VAL39]] to float
+; CHECK-NEXT: [[VAL41:%.*]] = getelementptr inbounds i8, ptr [[ARG5]], i64 3
+; CHECK-NEXT: [[VAL42:%.*]] = load i8, ptr [[VAL41]], align 1
+; CHECK-NEXT: [[VAL43:%.*]] = uitofp i8 [[VAL42]] to float
+; CHECK-NEXT: [[VAL44:%.*]] = fsub fast float [[VAL34]], [[VAL]]
+; CHECK-NEXT: [[VAL45:%.*]] = fsub fast float [[VAL37]], [[VAL9]]
+; CHECK-NEXT: [[VAL46:%.*]] = fsub fast float [[VAL40]], [[VAL10]]
+; CHECK-NEXT: [[VAL47:%.*]] = fsub fast float [[VAL43]], [[VAL11]]
+; CHECK-NEXT: [[VAL48:%.*]] = fmul fast float [[VAL44]], [[VAL26]]
+; CHECK-NEXT: [[VAL49:%.*]] = fmul fast float [[VAL45]], [[VAL27]]
+; CHECK-NEXT: [[VAL50:%.*]] = fadd fast float [[VAL49]], [[VAL48]]
+; CHECK-NEXT: [[VAL51:%.*]] = fmul fast float [[VAL46]], [[VAL28]]
+; CHECK-NEXT: [[VAL52:%.*]] = fadd fast float [[VAL50]], [[VAL51]]
+; CHECK-NEXT: [[VAL53:%.*]] = fmul fast float [[VAL47]], [[VAL29]]
+; CHECK-NEXT: [[VAL54:%.*]] = fadd fast float [[VAL52]], [[VAL53]]
; CHECK-NEXT: [[VAL55]] = tail call fast float @llvm.minnum.f32(float [[VAL31]], float [[ARG1:%.*]])
; CHECK-NEXT: [[VAL56:%.*]] = tail call fast float @llvm.maxnum.f32(float [[ARG2:%.*]], float [[VAL54]])
; CHECK-NEXT: call void @ham(float [[VAL55]], float [[VAL56]])
diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/basic-strided-loads.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/basic-strided-loads.ll
new file mode 100644
index 0000000..645dbc4
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/basic-strided-loads.ll
@@ -0,0 +1,741 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+
+; RUN: opt -mtriple=riscv64 -mattr=+m,+v -passes=slp-vectorizer -S < %s | FileCheck %s
+
+define void @const_stride_1_no_reordering(ptr %pl, ptr %ps) {
+; CHECK-LABEL: define void @const_stride_1_no_reordering(
+; CHECK-SAME: ptr [[PL:%.*]], ptr [[PS:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 0
+; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0
+; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr [[GEP_L0]], align 16
+; CHECK-NEXT: store <16 x i8> [[TMP1]], ptr [[GEP_S0]], align 16
+; CHECK-NEXT: ret void
+;
+ %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 0
+ %gep_l1 = getelementptr inbounds i8, ptr %pl, i64 1
+ %gep_l2 = getelementptr inbounds i8, ptr %pl, i64 2
+ %gep_l3 = getelementptr inbounds i8, ptr %pl, i64 3
+ %gep_l4 = getelementptr inbounds i8, ptr %pl, i64 4
+ %gep_l5 = getelementptr inbounds i8, ptr %pl, i64 5
+ %gep_l6 = getelementptr inbounds i8, ptr %pl, i64 6
+ %gep_l7 = getelementptr inbounds i8, ptr %pl, i64 7
+ %gep_l8 = getelementptr inbounds i8, ptr %pl, i64 8
+ %gep_l9 = getelementptr inbounds i8, ptr %pl, i64 9
+ %gep_l10 = getelementptr inbounds i8, ptr %pl, i64 10
+ %gep_l11 = getelementptr inbounds i8, ptr %pl, i64 11
+ %gep_l12 = getelementptr inbounds i8, ptr %pl, i64 12
+ %gep_l13 = getelementptr inbounds i8, ptr %pl, i64 13
+ %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 14
+ %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 15
+
+ %load0 = load i8, ptr %gep_l0 , align 16
+ %load1 = load i8, ptr %gep_l1 , align 16
+ %load2 = load i8, ptr %gep_l2 , align 16
+ %load3 = load i8, ptr %gep_l3 , align 16
+ %load4 = load i8, ptr %gep_l4 , align 16
+ %load5 = load i8, ptr %gep_l5 , align 16
+ %load6 = load i8, ptr %gep_l6 , align 16
+ %load7 = load i8, ptr %gep_l7 , align 16
+ %load8 = load i8, ptr %gep_l8 , align 16
+ %load9 = load i8, ptr %gep_l9 , align 16
+ %load10 = load i8, ptr %gep_l10, align 16
+ %load11 = load i8, ptr %gep_l11, align 16
+ %load12 = load i8, ptr %gep_l12, align 16
+ %load13 = load i8, ptr %gep_l13, align 16
+ %load14 = load i8, ptr %gep_l14, align 16
+ %load15 = load i8, ptr %gep_l15, align 16
+
+ %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0
+ %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1
+ %gep_s2 = getelementptr inbounds i8, ptr %ps, i64 2
+ %gep_s3 = getelementptr inbounds i8, ptr %ps, i64 3
+ %gep_s4 = getelementptr inbounds i8, ptr %ps, i64 4
+ %gep_s5 = getelementptr inbounds i8, ptr %ps, i64 5
+ %gep_s6 = getelementptr inbounds i8, ptr %ps, i64 6
+ %gep_s7 = getelementptr inbounds i8, ptr %ps, i64 7
+ %gep_s8 = getelementptr inbounds i8, ptr %ps, i64 8
+ %gep_s9 = getelementptr inbounds i8, ptr %ps, i64 9
+ %gep_s10 = getelementptr inbounds i8, ptr %ps, i64 10
+ %gep_s11 = getelementptr inbounds i8, ptr %ps, i64 11
+ %gep_s12 = getelementptr inbounds i8, ptr %ps, i64 12
+ %gep_s13 = getelementptr inbounds i8, ptr %ps, i64 13
+ %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14
+ %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15
+
+ store i8 %load0, ptr %gep_s0, align 16
+ store i8 %load1, ptr %gep_s1, align 16
+ store i8 %load2, ptr %gep_s2, align 16
+ store i8 %load3, ptr %gep_s3, align 16
+ store i8 %load4, ptr %gep_s4, align 16
+ store i8 %load5, ptr %gep_s5, align 16
+ store i8 %load6, ptr %gep_s6, align 16
+ store i8 %load7, ptr %gep_s7, align 16
+ store i8 %load8, ptr %gep_s8, align 16
+ store i8 %load9, ptr %gep_s9, align 16
+ store i8 %load10, ptr %gep_s10, align 16
+ store i8 %load11, ptr %gep_s11, align 16
+ store i8 %load12, ptr %gep_s12, align 16
+ store i8 %load13, ptr %gep_s13, align 16
+ store i8 %load14, ptr %gep_s14, align 16
+ store i8 %load15, ptr %gep_s15, align 16
+
+ ret void
+}
+
+define void @const_stride_1_with_reordering(ptr %pl, ptr %ps) {
+; CHECK-LABEL: define void @const_stride_1_with_reordering(
+; CHECK-SAME: ptr [[PL:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 0
+; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0
+; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr [[GEP_L0]], align 16
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <16 x i8> [[TMP1]], <16 x i8> poison, <16 x i32> <i32 1, i32 0, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; CHECK-NEXT: store <16 x i8> [[TMP2]], ptr [[GEP_S0]], align 16
+; CHECK-NEXT: ret void
+;
+ %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 0
+ %gep_l1 = getelementptr inbounds i8, ptr %pl, i64 1
+ %gep_l2 = getelementptr inbounds i8, ptr %pl, i64 2
+ %gep_l3 = getelementptr inbounds i8, ptr %pl, i64 3
+ %gep_l4 = getelementptr inbounds i8, ptr %pl, i64 4
+ %gep_l5 = getelementptr inbounds i8, ptr %pl, i64 5
+ %gep_l6 = getelementptr inbounds i8, ptr %pl, i64 6
+ %gep_l7 = getelementptr inbounds i8, ptr %pl, i64 7
+ %gep_l8 = getelementptr inbounds i8, ptr %pl, i64 8
+ %gep_l9 = getelementptr inbounds i8, ptr %pl, i64 9
+ %gep_l10 = getelementptr inbounds i8, ptr %pl, i64 10
+ %gep_l11 = getelementptr inbounds i8, ptr %pl, i64 11
+ %gep_l12 = getelementptr inbounds i8, ptr %pl, i64 12
+ %gep_l13 = getelementptr inbounds i8, ptr %pl, i64 13
+ %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 14
+ %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 15
+
+ %load0 = load i8, ptr %gep_l0 , align 16
+ %load1 = load i8, ptr %gep_l1 , align 16
+ %load2 = load i8, ptr %gep_l2 , align 16
+ %load3 = load i8, ptr %gep_l3 , align 16
+ %load4 = load i8, ptr %gep_l4 , align 16
+ %load5 = load i8, ptr %gep_l5 , align 16
+ %load6 = load i8, ptr %gep_l6 , align 16
+ %load7 = load i8, ptr %gep_l7 , align 16
+ %load8 = load i8, ptr %gep_l8 , align 16
+ %load9 = load i8, ptr %gep_l9 , align 16
+ %load10 = load i8, ptr %gep_l10, align 16
+ %load11 = load i8, ptr %gep_l11, align 16
+ %load12 = load i8, ptr %gep_l12, align 16
+ %load13 = load i8, ptr %gep_l13, align 16
+ %load14 = load i8, ptr %gep_l14, align 16
+ %load15 = load i8, ptr %gep_l15, align 16
+
+ %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0
+ %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1
+ %gep_s2 = getelementptr inbounds i8, ptr %ps, i64 2
+ %gep_s3 = getelementptr inbounds i8, ptr %ps, i64 3
+ %gep_s4 = getelementptr inbounds i8, ptr %ps, i64 4
+ %gep_s5 = getelementptr inbounds i8, ptr %ps, i64 5
+ %gep_s6 = getelementptr inbounds i8, ptr %ps, i64 6
+ %gep_s7 = getelementptr inbounds i8, ptr %ps, i64 7
+ %gep_s8 = getelementptr inbounds i8, ptr %ps, i64 8
+ %gep_s9 = getelementptr inbounds i8, ptr %ps, i64 9
+ %gep_s10 = getelementptr inbounds i8, ptr %ps, i64 10
+ %gep_s11 = getelementptr inbounds i8, ptr %ps, i64 11
+ %gep_s12 = getelementptr inbounds i8, ptr %ps, i64 12
+ %gep_s13 = getelementptr inbounds i8, ptr %ps, i64 13
+ %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14
+ %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15
+
+ ; NOTE: value from %load1 in stored in %gep_s0
+ store i8 %load1, ptr %gep_s0, align 16
+ store i8 %load0, ptr %gep_s1, align 16
+ store i8 %load2, ptr %gep_s2, align 16
+ store i8 %load3, ptr %gep_s3, align 16
+ store i8 %load4, ptr %gep_s4, align 16
+ store i8 %load5, ptr %gep_s5, align 16
+ store i8 %load6, ptr %gep_s6, align 16
+ store i8 %load7, ptr %gep_s7, align 16
+ store i8 %load8, ptr %gep_s8, align 16
+ store i8 %load9, ptr %gep_s9, align 16
+ store i8 %load10, ptr %gep_s10, align 16
+ store i8 %load11, ptr %gep_s11, align 16
+ store i8 %load12, ptr %gep_s12, align 16
+ store i8 %load13, ptr %gep_s13, align 16
+ store i8 %load14, ptr %gep_s14, align 16
+ store i8 %load15, ptr %gep_s15, align 16
+
+ ret void
+}
+
+
+define void @const_stride_2_no_reordering(ptr %pl, ptr %ps) {
+; CHECK-LABEL: define void @const_stride_2_no_reordering(
+; CHECK-SAME: ptr [[PL:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 0
+; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0
+; CHECK-NEXT: [[TMP2:%.*]] = call <31 x i8> @llvm.masked.load.v31i8.p0(ptr [[GEP_L0]], i32 16, <31 x i1> <i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true>, <31 x i8> poison)
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <31 x i8> [[TMP2]], <31 x i8> poison, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
+; CHECK-NEXT: store <16 x i8> [[TMP1]], ptr [[GEP_S0]], align 16
+; CHECK-NEXT: ret void
+;
+ %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 0
+ %gep_l1 = getelementptr inbounds i8, ptr %pl, i64 2
+ %gep_l2 = getelementptr inbounds i8, ptr %pl, i64 4
+ %gep_l3 = getelementptr inbounds i8, ptr %pl, i64 6
+ %gep_l4 = getelementptr inbounds i8, ptr %pl, i64 8
+ %gep_l5 = getelementptr inbounds i8, ptr %pl, i64 10
+ %gep_l6 = getelementptr inbounds i8, ptr %pl, i64 12
+ %gep_l7 = getelementptr inbounds i8, ptr %pl, i64 14
+ %gep_l8 = getelementptr inbounds i8, ptr %pl, i64 16
+ %gep_l9 = getelementptr inbounds i8, ptr %pl, i64 18
+ %gep_l10 = getelementptr inbounds i8, ptr %pl, i64 20
+ %gep_l11 = getelementptr inbounds i8, ptr %pl, i64 22
+ %gep_l12 = getelementptr inbounds i8, ptr %pl, i64 24
+ %gep_l13 = getelementptr inbounds i8, ptr %pl, i64 26
+ %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 28
+ %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 30
+
+ %load0 = load i8, ptr %gep_l0 , align 16
+ %load1 = load i8, ptr %gep_l1 , align 16
+ %load2 = load i8, ptr %gep_l2 , align 16
+ %load3 = load i8, ptr %gep_l3 , align 16
+ %load4 = load i8, ptr %gep_l4 , align 16
+ %load5 = load i8, ptr %gep_l5 , align 16
+ %load6 = load i8, ptr %gep_l6 , align 16
+ %load7 = load i8, ptr %gep_l7 , align 16
+ %load8 = load i8, ptr %gep_l8 , align 16
+ %load9 = load i8, ptr %gep_l9 , align 16
+ %load10 = load i8, ptr %gep_l10, align 16
+ %load11 = load i8, ptr %gep_l11, align 16
+ %load12 = load i8, ptr %gep_l12, align 16
+ %load13 = load i8, ptr %gep_l13, align 16
+ %load14 = load i8, ptr %gep_l14, align 16
+ %load15 = load i8, ptr %gep_l15, align 16
+
+ %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0
+ %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1
+ %gep_s2 = getelementptr inbounds i8, ptr %ps, i64 2
+ %gep_s3 = getelementptr inbounds i8, ptr %ps, i64 3
+ %gep_s4 = getelementptr inbounds i8, ptr %ps, i64 4
+ %gep_s5 = getelementptr inbounds i8, ptr %ps, i64 5
+ %gep_s6 = getelementptr inbounds i8, ptr %ps, i64 6
+ %gep_s7 = getelementptr inbounds i8, ptr %ps, i64 7
+ %gep_s8 = getelementptr inbounds i8, ptr %ps, i64 8
+ %gep_s9 = getelementptr inbounds i8, ptr %ps, i64 9
+ %gep_s10 = getelementptr inbounds i8, ptr %ps, i64 10
+ %gep_s11 = getelementptr inbounds i8, ptr %ps, i64 11
+ %gep_s12 = getelementptr inbounds i8, ptr %ps, i64 12
+ %gep_s13 = getelementptr inbounds i8, ptr %ps, i64 13
+ %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14
+ %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15
+
+ store i8 %load0, ptr %gep_s0, align 16
+ store i8 %load1, ptr %gep_s1, align 16
+ store i8 %load2, ptr %gep_s2, align 16
+ store i8 %load3, ptr %gep_s3, align 16
+ store i8 %load4, ptr %gep_s4, align 16
+ store i8 %load5, ptr %gep_s5, align 16
+ store i8 %load6, ptr %gep_s6, align 16
+ store i8 %load7, ptr %gep_s7, align 16
+ store i8 %load8, ptr %gep_s8, align 16
+ store i8 %load9, ptr %gep_s9, align 16
+ store i8 %load10, ptr %gep_s10, align 16
+ store i8 %load11, ptr %gep_s11, align 16
+ store i8 %load12, ptr %gep_s12, align 16
+ store i8 %load13, ptr %gep_s13, align 16
+ store i8 %load14, ptr %gep_s14, align 16
+ store i8 %load15, ptr %gep_s15, align 16
+
+ ret void
+}
+
+define void @const_stride_2_with_reordering(ptr %pl, ptr %ps) {
+; CHECK-LABEL: define void @const_stride_2_with_reordering(
+; CHECK-SAME: ptr [[PL:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 0
+; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0
+; CHECK-NEXT: [[TMP1:%.*]] = call <31 x i8> @llvm.masked.load.v31i8.p0(ptr [[GEP_L0]], i32 16, <31 x i1> <i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true>, <31 x i8> poison)
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <31 x i8> [[TMP1]], <31 x i8> poison, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <31 x i8> [[TMP1]], <31 x i8> poison, <16 x i32> <i32 2, i32 0, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
+; CHECK-NEXT: store <16 x i8> [[TMP2]], ptr [[GEP_S0]], align 16
+; CHECK-NEXT: ret void
+;
+ %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 0
+ %gep_l1 = getelementptr inbounds i8, ptr %pl, i64 2
+ %gep_l2 = getelementptr inbounds i8, ptr %pl, i64 4
+ %gep_l3 = getelementptr inbounds i8, ptr %pl, i64 6
+ %gep_l4 = getelementptr inbounds i8, ptr %pl, i64 8
+ %gep_l5 = getelementptr inbounds i8, ptr %pl, i64 10
+ %gep_l6 = getelementptr inbounds i8, ptr %pl, i64 12
+ %gep_l7 = getelementptr inbounds i8, ptr %pl, i64 14
+ %gep_l8 = getelementptr inbounds i8, ptr %pl, i64 16
+ %gep_l9 = getelementptr inbounds i8, ptr %pl, i64 18
+ %gep_l10 = getelementptr inbounds i8, ptr %pl, i64 20
+ %gep_l11 = getelementptr inbounds i8, ptr %pl, i64 22
+ %gep_l12 = getelementptr inbounds i8, ptr %pl, i64 24
+ %gep_l13 = getelementptr inbounds i8, ptr %pl, i64 26
+ %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 28
+ %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 30
+
+ %load0 = load i8, ptr %gep_l0 , align 16
+ %load1 = load i8, ptr %gep_l1 , align 16
+ %load2 = load i8, ptr %gep_l2 , align 16
+ %load3 = load i8, ptr %gep_l3 , align 16
+ %load4 = load i8, ptr %gep_l4 , align 16
+ %load5 = load i8, ptr %gep_l5 , align 16
+ %load6 = load i8, ptr %gep_l6 , align 16
+ %load7 = load i8, ptr %gep_l7 , align 16
+ %load8 = load i8, ptr %gep_l8 , align 16
+ %load9 = load i8, ptr %gep_l9 , align 16
+ %load10 = load i8, ptr %gep_l10, align 16
+ %load11 = load i8, ptr %gep_l11, align 16
+ %load12 = load i8, ptr %gep_l12, align 16
+ %load13 = load i8, ptr %gep_l13, align 16
+ %load14 = load i8, ptr %gep_l14, align 16
+ %load15 = load i8, ptr %gep_l15, align 16
+
+ %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0
+ %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1
+ %gep_s2 = getelementptr inbounds i8, ptr %ps, i64 2
+ %gep_s3 = getelementptr inbounds i8, ptr %ps, i64 3
+ %gep_s4 = getelementptr inbounds i8, ptr %ps, i64 4
+ %gep_s5 = getelementptr inbounds i8, ptr %ps, i64 5
+ %gep_s6 = getelementptr inbounds i8, ptr %ps, i64 6
+ %gep_s7 = getelementptr inbounds i8, ptr %ps, i64 7
+ %gep_s8 = getelementptr inbounds i8, ptr %ps, i64 8
+ %gep_s9 = getelementptr inbounds i8, ptr %ps, i64 9
+ %gep_s10 = getelementptr inbounds i8, ptr %ps, i64 10
+ %gep_s11 = getelementptr inbounds i8, ptr %ps, i64 11
+ %gep_s12 = getelementptr inbounds i8, ptr %ps, i64 12
+ %gep_s13 = getelementptr inbounds i8, ptr %ps, i64 13
+ %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14
+ %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15
+
+ store i8 %load1, ptr %gep_s0, align 16
+ store i8 %load0, ptr %gep_s1, align 16
+ store i8 %load2, ptr %gep_s2, align 16
+ store i8 %load3, ptr %gep_s3, align 16
+ store i8 %load4, ptr %gep_s4, align 16
+ store i8 %load5, ptr %gep_s5, align 16
+ store i8 %load6, ptr %gep_s6, align 16
+ store i8 %load7, ptr %gep_s7, align 16
+ store i8 %load8, ptr %gep_s8, align 16
+ store i8 %load9, ptr %gep_s9, align 16
+ store i8 %load10, ptr %gep_s10, align 16
+ store i8 %load11, ptr %gep_s11, align 16
+ store i8 %load12, ptr %gep_s12, align 16
+ store i8 %load13, ptr %gep_s13, align 16
+ store i8 %load14, ptr %gep_s14, align 16
+ store i8 %load15, ptr %gep_s15, align 16
+
+ ret void
+}
+
+define void @rt_stride_1_no_reordering(ptr %pl, i64 %stride, ptr %ps) {
+; CHECK-LABEL: define void @rt_stride_1_no_reordering(
+; CHECK-SAME: ptr [[PL:%.*]], i64 [[STRIDE:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[STRIDE0:%.*]] = mul nsw i64 [[STRIDE]], 0
+; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 [[STRIDE0]]
+; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0
+; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[STRIDE]], 1
+; CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.experimental.vp.strided.load.v16i8.p0.i64(ptr align 16 [[GEP_L0]], i64 [[TMP1]], <16 x i1> splat (i1 true), i32 16)
+; CHECK-NEXT: store <16 x i8> [[TMP2]], ptr [[GEP_S0]], align 16
+; CHECK-NEXT: ret void
+;
+ %stride0 = mul nsw i64 %stride, 0
+ %stride1 = mul nsw i64 %stride, 1
+ %stride2 = mul nsw i64 %stride, 2
+ %stride3 = mul nsw i64 %stride, 3
+ %stride4 = mul nsw i64 %stride, 4
+ %stride5 = mul nsw i64 %stride, 5
+ %stride6 = mul nsw i64 %stride, 6
+ %stride7 = mul nsw i64 %stride, 7
+ %stride8 = mul nsw i64 %stride, 8
+ %stride9 = mul nsw i64 %stride, 9
+ %stride10 = mul nsw i64 %stride, 10
+ %stride11 = mul nsw i64 %stride, 11
+ %stride12 = mul nsw i64 %stride, 12
+ %stride13 = mul nsw i64 %stride, 13
+ %stride14 = mul nsw i64 %stride, 14
+ %stride15 = mul nsw i64 %stride, 15
+
+ %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 %stride0
+ %gep_l1 = getelementptr inbounds i8, ptr %pl, i64 %stride1
+ %gep_l2 = getelementptr inbounds i8, ptr %pl, i64 %stride2
+ %gep_l3 = getelementptr inbounds i8, ptr %pl, i64 %stride3
+ %gep_l4 = getelementptr inbounds i8, ptr %pl, i64 %stride4
+ %gep_l5 = getelementptr inbounds i8, ptr %pl, i64 %stride5
+ %gep_l6 = getelementptr inbounds i8, ptr %pl, i64 %stride6
+ %gep_l7 = getelementptr inbounds i8, ptr %pl, i64 %stride7
+ %gep_l8 = getelementptr inbounds i8, ptr %pl, i64 %stride8
+ %gep_l9 = getelementptr inbounds i8, ptr %pl, i64 %stride9
+ %gep_l10 = getelementptr inbounds i8, ptr %pl, i64 %stride10
+ %gep_l11 = getelementptr inbounds i8, ptr %pl, i64 %stride11
+ %gep_l12 = getelementptr inbounds i8, ptr %pl, i64 %stride12
+ %gep_l13 = getelementptr inbounds i8, ptr %pl, i64 %stride13
+ %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 %stride14
+ %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 %stride15
+
+ %load0 = load i8, ptr %gep_l0 , align 16
+ %load1 = load i8, ptr %gep_l1 , align 16
+ %load2 = load i8, ptr %gep_l2 , align 16
+ %load3 = load i8, ptr %gep_l3 , align 16
+ %load4 = load i8, ptr %gep_l4 , align 16
+ %load5 = load i8, ptr %gep_l5 , align 16
+ %load6 = load i8, ptr %gep_l6 , align 16
+ %load7 = load i8, ptr %gep_l7 , align 16
+ %load8 = load i8, ptr %gep_l8 , align 16
+ %load9 = load i8, ptr %gep_l9 , align 16
+ %load10 = load i8, ptr %gep_l10, align 16
+ %load11 = load i8, ptr %gep_l11, align 16
+ %load12 = load i8, ptr %gep_l12, align 16
+ %load13 = load i8, ptr %gep_l13, align 16
+ %load14 = load i8, ptr %gep_l14, align 16
+ %load15 = load i8, ptr %gep_l15, align 16
+
+ %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0
+ %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1
+ %gep_s2 = getelementptr inbounds i8, ptr %ps, i64 2
+ %gep_s3 = getelementptr inbounds i8, ptr %ps, i64 3
+ %gep_s4 = getelementptr inbounds i8, ptr %ps, i64 4
+ %gep_s5 = getelementptr inbounds i8, ptr %ps, i64 5
+ %gep_s6 = getelementptr inbounds i8, ptr %ps, i64 6
+ %gep_s7 = getelementptr inbounds i8, ptr %ps, i64 7
+ %gep_s8 = getelementptr inbounds i8, ptr %ps, i64 8
+ %gep_s9 = getelementptr inbounds i8, ptr %ps, i64 9
+ %gep_s10 = getelementptr inbounds i8, ptr %ps, i64 10
+ %gep_s11 = getelementptr inbounds i8, ptr %ps, i64 11
+ %gep_s12 = getelementptr inbounds i8, ptr %ps, i64 12
+ %gep_s13 = getelementptr inbounds i8, ptr %ps, i64 13
+ %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14
+ %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15
+
+ store i8 %load0, ptr %gep_s0, align 16
+ store i8 %load1, ptr %gep_s1, align 16
+ store i8 %load2, ptr %gep_s2, align 16
+ store i8 %load3, ptr %gep_s3, align 16
+ store i8 %load4, ptr %gep_s4, align 16
+ store i8 %load5, ptr %gep_s5, align 16
+ store i8 %load6, ptr %gep_s6, align 16
+ store i8 %load7, ptr %gep_s7, align 16
+ store i8 %load8, ptr %gep_s8, align 16
+ store i8 %load9, ptr %gep_s9, align 16
+ store i8 %load10, ptr %gep_s10, align 16
+ store i8 %load11, ptr %gep_s11, align 16
+ store i8 %load12, ptr %gep_s12, align 16
+ store i8 %load13, ptr %gep_s13, align 16
+ store i8 %load14, ptr %gep_s14, align 16
+ store i8 %load15, ptr %gep_s15, align 16
+
+ ret void
+}
+
+define void @rt_stride_1_with_reordering(ptr %pl, i64 %stride, ptr %ps) {
+; CHECK-LABEL: define void @rt_stride_1_with_reordering(
+; CHECK-SAME: ptr [[PL:%.*]], i64 [[STRIDE:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[STRIDE0:%.*]] = mul nsw i64 [[STRIDE]], 0
+; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 [[STRIDE0]]
+; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0
+; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[STRIDE]], 1
+; CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.experimental.vp.strided.load.v16i8.p0.i64(ptr align 16 [[GEP_L0]], i64 [[TMP1]], <16 x i1> splat (i1 true), i32 16)
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <16 x i8> [[TMP2]], <16 x i8> poison, <16 x i32> <i32 1, i32 0, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; CHECK-NEXT: store <16 x i8> [[TMP3]], ptr [[GEP_S0]], align 16
+; CHECK-NEXT: ret void
+;
+ %stride0 = mul nsw i64 %stride, 0
+ %stride1 = mul nsw i64 %stride, 1
+ %stride2 = mul nsw i64 %stride, 2
+ %stride3 = mul nsw i64 %stride, 3
+ %stride4 = mul nsw i64 %stride, 4
+ %stride5 = mul nsw i64 %stride, 5
+ %stride6 = mul nsw i64 %stride, 6
+ %stride7 = mul nsw i64 %stride, 7
+ %stride8 = mul nsw i64 %stride, 8
+ %stride9 = mul nsw i64 %stride, 9
+ %stride10 = mul nsw i64 %stride, 10
+ %stride11 = mul nsw i64 %stride, 11
+ %stride12 = mul nsw i64 %stride, 12
+ %stride13 = mul nsw i64 %stride, 13
+ %stride14 = mul nsw i64 %stride, 14
+ %stride15 = mul nsw i64 %stride, 15
+
+ %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 %stride0
+ %gep_l1 = getelementptr inbounds i8, ptr %pl, i64 %stride1
+ %gep_l2 = getelementptr inbounds i8, ptr %pl, i64 %stride2
+ %gep_l3 = getelementptr inbounds i8, ptr %pl, i64 %stride3
+ %gep_l4 = getelementptr inbounds i8, ptr %pl, i64 %stride4
+ %gep_l5 = getelementptr inbounds i8, ptr %pl, i64 %stride5
+ %gep_l6 = getelementptr inbounds i8, ptr %pl, i64 %stride6
+ %gep_l7 = getelementptr inbounds i8, ptr %pl, i64 %stride7
+ %gep_l8 = getelementptr inbounds i8, ptr %pl, i64 %stride8
+ %gep_l9 = getelementptr inbounds i8, ptr %pl, i64 %stride9
+ %gep_l10 = getelementptr inbounds i8, ptr %pl, i64 %stride10
+ %gep_l11 = getelementptr inbounds i8, ptr %pl, i64 %stride11
+ %gep_l12 = getelementptr inbounds i8, ptr %pl, i64 %stride12
+ %gep_l13 = getelementptr inbounds i8, ptr %pl, i64 %stride13
+ %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 %stride14
+ %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 %stride15
+
+ %load0 = load i8, ptr %gep_l0 , align 16
+ %load1 = load i8, ptr %gep_l1 , align 16
+ %load2 = load i8, ptr %gep_l2 , align 16
+ %load3 = load i8, ptr %gep_l3 , align 16
+ %load4 = load i8, ptr %gep_l4 , align 16
+ %load5 = load i8, ptr %gep_l5 , align 16
+ %load6 = load i8, ptr %gep_l6 , align 16
+ %load7 = load i8, ptr %gep_l7 , align 16
+ %load8 = load i8, ptr %gep_l8 , align 16
+ %load9 = load i8, ptr %gep_l9 , align 16
+ %load10 = load i8, ptr %gep_l10, align 16
+ %load11 = load i8, ptr %gep_l11, align 16
+ %load12 = load i8, ptr %gep_l12, align 16
+ %load13 = load i8, ptr %gep_l13, align 16
+ %load14 = load i8, ptr %gep_l14, align 16
+ %load15 = load i8, ptr %gep_l15, align 16
+
+ %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0
+ %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1
+ %gep_s2 = getelementptr inbounds i8, ptr %ps, i64 2
+ %gep_s3 = getelementptr inbounds i8, ptr %ps, i64 3
+ %gep_s4 = getelementptr inbounds i8, ptr %ps, i64 4
+ %gep_s5 = getelementptr inbounds i8, ptr %ps, i64 5
+ %gep_s6 = getelementptr inbounds i8, ptr %ps, i64 6
+ %gep_s7 = getelementptr inbounds i8, ptr %ps, i64 7
+ %gep_s8 = getelementptr inbounds i8, ptr %ps, i64 8
+ %gep_s9 = getelementptr inbounds i8, ptr %ps, i64 9
+ %gep_s10 = getelementptr inbounds i8, ptr %ps, i64 10
+ %gep_s11 = getelementptr inbounds i8, ptr %ps, i64 11
+ %gep_s12 = getelementptr inbounds i8, ptr %ps, i64 12
+ %gep_s13 = getelementptr inbounds i8, ptr %ps, i64 13
+ %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14
+ %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15
+
+ store i8 %load1, ptr %gep_s0, align 16
+ store i8 %load0, ptr %gep_s1, align 16
+ store i8 %load2, ptr %gep_s2, align 16
+ store i8 %load3, ptr %gep_s3, align 16
+ store i8 %load4, ptr %gep_s4, align 16
+ store i8 %load5, ptr %gep_s5, align 16
+ store i8 %load6, ptr %gep_s6, align 16
+ store i8 %load7, ptr %gep_s7, align 16
+ store i8 %load8, ptr %gep_s8, align 16
+ store i8 %load9, ptr %gep_s9, align 16
+ store i8 %load10, ptr %gep_s10, align 16
+ store i8 %load11, ptr %gep_s11, align 16
+ store i8 %load12, ptr %gep_s12, align 16
+ store i8 %load13, ptr %gep_s13, align 16
+ store i8 %load14, ptr %gep_s14, align 16
+ store i8 %load15, ptr %gep_s15, align 16
+
+ ret void
+}
+
+; TODO: We want to generate this code:
+; define void @constant_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) {
+; %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 %offset0
+; %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0
+; %strided_load = call <4 x i32> @llvm.experimental.vp.strided.load.v4i32.p0.i64(ptr align 16 %gep_l0, i64 8, <4 x i1> splat (i1 true), i32 4)
+; %bitcast_ = bitcast <4 x i32> %strided_load to <16 x i8>
+; store <16 x i8> %bitcast_, ptr %gep_s0, align 16
+; ret void
+; }
+define void @constant_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) {
+; CHECK-LABEL: define void @constant_stride_widen_no_reordering(
+; CHECK-SAME: ptr [[PL:%.*]], i64 [[STRIDE:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 0
+; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0
+; CHECK-NEXT: [[TMP1:%.*]] = call <28 x i8> @llvm.masked.load.v28i8.p0(ptr [[GEP_L0]], i32 16, <28 x i1> <i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true>, <28 x i8> poison)
+; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <28 x i8> [[TMP1]], <28 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19, i32 24, i32 25, i32 26, i32 27>
+; CHECK-NEXT: store <16 x i8> [[TMP8]], ptr [[GEP_S0]], align 16
+; CHECK-NEXT: ret void
+;
+ %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 0
+ %gep_l1 = getelementptr inbounds i8, ptr %pl, i64 1
+ %gep_l2 = getelementptr inbounds i8, ptr %pl, i64 2
+ %gep_l3 = getelementptr inbounds i8, ptr %pl, i64 3
+ %gep_l4 = getelementptr inbounds i8, ptr %pl, i64 8
+ %gep_l5 = getelementptr inbounds i8, ptr %pl, i64 9
+ %gep_l6 = getelementptr inbounds i8, ptr %pl, i64 10
+ %gep_l7 = getelementptr inbounds i8, ptr %pl, i64 11
+ %gep_l8 = getelementptr inbounds i8, ptr %pl, i64 16
+ %gep_l9 = getelementptr inbounds i8, ptr %pl, i64 17
+ %gep_l10 = getelementptr inbounds i8, ptr %pl, i64 18
+ %gep_l11 = getelementptr inbounds i8, ptr %pl, i64 19
+ %gep_l12 = getelementptr inbounds i8, ptr %pl, i64 24
+ %gep_l13 = getelementptr inbounds i8, ptr %pl, i64 25
+ %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 26
+ %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 27
+
+ %load0 = load i8, ptr %gep_l0 , align 16
+ %load1 = load i8, ptr %gep_l1 , align 16
+ %load2 = load i8, ptr %gep_l2 , align 16
+ %load3 = load i8, ptr %gep_l3 , align 16
+ %load4 = load i8, ptr %gep_l4 , align 16
+ %load5 = load i8, ptr %gep_l5 , align 16
+ %load6 = load i8, ptr %gep_l6 , align 16
+ %load7 = load i8, ptr %gep_l7 , align 16
+ %load8 = load i8, ptr %gep_l8 , align 16
+ %load9 = load i8, ptr %gep_l9 , align 16
+ %load10 = load i8, ptr %gep_l10, align 16
+ %load11 = load i8, ptr %gep_l11, align 16
+ %load12 = load i8, ptr %gep_l12, align 16
+ %load13 = load i8, ptr %gep_l13, align 16
+ %load14 = load i8, ptr %gep_l14, align 16
+ %load15 = load i8, ptr %gep_l15, align 16
+
+ %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0
+ %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1
+ %gep_s2 = getelementptr inbounds i8, ptr %ps, i64 2
+ %gep_s3 = getelementptr inbounds i8, ptr %ps, i64 3
+ %gep_s4 = getelementptr inbounds i8, ptr %ps, i64 4
+ %gep_s5 = getelementptr inbounds i8, ptr %ps, i64 5
+ %gep_s6 = getelementptr inbounds i8, ptr %ps, i64 6
+ %gep_s7 = getelementptr inbounds i8, ptr %ps, i64 7
+ %gep_s8 = getelementptr inbounds i8, ptr %ps, i64 8
+ %gep_s9 = getelementptr inbounds i8, ptr %ps, i64 9
+ %gep_s10 = getelementptr inbounds i8, ptr %ps, i64 10
+ %gep_s11 = getelementptr inbounds i8, ptr %ps, i64 11
+ %gep_s12 = getelementptr inbounds i8, ptr %ps, i64 12
+ %gep_s13 = getelementptr inbounds i8, ptr %ps, i64 13
+ %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14
+ %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15
+
+ store i8 %load0, ptr %gep_s0, align 16
+ store i8 %load1, ptr %gep_s1, align 16
+ store i8 %load2, ptr %gep_s2, align 16
+ store i8 %load3, ptr %gep_s3, align 16
+ store i8 %load4, ptr %gep_s4, align 16
+ store i8 %load5, ptr %gep_s5, align 16
+ store i8 %load6, ptr %gep_s6, align 16
+ store i8 %load7, ptr %gep_s7, align 16
+ store i8 %load8, ptr %gep_s8, align 16
+ store i8 %load9, ptr %gep_s9, align 16
+ store i8 %load10, ptr %gep_s10, align 16
+ store i8 %load11, ptr %gep_s11, align 16
+ store i8 %load12, ptr %gep_s12, align 16
+ store i8 %load13, ptr %gep_s13, align 16
+ store i8 %load14, ptr %gep_s14, align 16
+ store i8 %load15, ptr %gep_s15, align 16
+
+ ret void
+}
+
+; TODO: We want to generate this code:
+; define void @rt_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) {
+; %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 %offset0
+; %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0
+; %strided_load = call <4 x i32> @llvm.experimental.vp.strided.load.v4i32.p0.i64(ptr align 16 %gep_l0, i64 %stride, <4 x i1> splat (i1 true), i32 4)
+; %bitcast_ = bitcast <4 x i32> %strided_load to <16 x i8>
+; store <16 x i8> %bitcast_, ptr %gep_s0, align 16
+; ret void
+; }
+define void @rt_stride_widen_no_reordering(ptr %pl, i64 %stride, ptr %ps) {
+; CHECK-LABEL: define void @rt_stride_widen_no_reordering(
+; CHECK-SAME: ptr [[PL:%.*]], i64 [[STRIDE:%.*]], ptr [[PS:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[OFFSET0:%.*]] = mul nsw i64 [[STRIDE]], 0
+; CHECK-NEXT: [[OFFSET4:%.*]] = mul nsw i64 [[STRIDE]], 1
+; CHECK-NEXT: [[OFFSET8:%.*]] = mul nsw i64 [[STRIDE]], 2
+; CHECK-NEXT: [[OFFSET12:%.*]] = mul nsw i64 [[STRIDE]], 3
+; CHECK-NEXT: [[GEP_L0:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 [[OFFSET0]]
+; CHECK-NEXT: [[GEP_L4:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 [[OFFSET4]]
+; CHECK-NEXT: [[GEP_L8:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 [[OFFSET8]]
+; CHECK-NEXT: [[GEP_L12:%.*]] = getelementptr inbounds i8, ptr [[PL]], i64 [[OFFSET12]]
+; CHECK-NEXT: [[GEP_S0:%.*]] = getelementptr inbounds i8, ptr [[PS]], i64 0
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i8>, ptr [[GEP_L0]], align 16
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i8>, ptr [[GEP_L4]], align 16
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i8>, ptr [[GEP_L8]], align 16
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i8>, ptr [[GEP_L12]], align 16
+; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x i8> [[TMP1]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <4 x i8> [[TMP2]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <4 x i8> [[TMP1]], <4 x i8> [[TMP2]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <4 x i8> [[TMP3]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <16 x i8> [[TMP7]], <16 x i8> [[TMP11]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <4 x i8> [[TMP4]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <16 x i8> [[TMP9]], <16 x i8> [[TMP10]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19>
+; CHECK-NEXT: store <16 x i8> [[TMP8]], ptr [[GEP_S0]], align 16
+; CHECK-NEXT: ret void
+;
+ %offset0 = mul nsw i64 %stride, 0
+ %offset1 = add nsw i64 %offset0, 1
+ %offset2 = add nsw i64 %offset0, 2
+ %offset3 = add nsw i64 %offset0, 3
+ %offset4 = mul nsw i64 %stride, 1
+ %offset5 = add nsw i64 %offset4, 1
+ %offset6 = add nsw i64 %offset4, 2
+ %offset7 = add nsw i64 %offset4, 3
+ %offset8 = mul nsw i64 %stride, 2
+ %offset9 = add nsw i64 %offset8, 1
+ %offset10 = add nsw i64 %offset8, 2
+ %offset11 = add nsw i64 %offset8, 3
+ %offset12 = mul nsw i64 %stride, 3
+ %offset13 = add nsw i64 %offset12, 1
+ %offset14 = add nsw i64 %offset12, 2
+ %offset15 = add nsw i64 %offset12, 3
+
+ %gep_l0 = getelementptr inbounds i8, ptr %pl, i64 %offset0
+ %gep_l1 = getelementptr inbounds i8, ptr %pl, i64 %offset1
+ %gep_l2 = getelementptr inbounds i8, ptr %pl, i64 %offset2
+ %gep_l3 = getelementptr inbounds i8, ptr %pl, i64 %offset3
+ %gep_l4 = getelementptr inbounds i8, ptr %pl, i64 %offset4
+ %gep_l5 = getelementptr inbounds i8, ptr %pl, i64 %offset5
+ %gep_l6 = getelementptr inbounds i8, ptr %pl, i64 %offset6
+ %gep_l7 = getelementptr inbounds i8, ptr %pl, i64 %offset7
+ %gep_l8 = getelementptr inbounds i8, ptr %pl, i64 %offset8
+ %gep_l9 = getelementptr inbounds i8, ptr %pl, i64 %offset9
+ %gep_l10 = getelementptr inbounds i8, ptr %pl, i64 %offset10
+ %gep_l11 = getelementptr inbounds i8, ptr %pl, i64 %offset11
+ %gep_l12 = getelementptr inbounds i8, ptr %pl, i64 %offset12
+ %gep_l13 = getelementptr inbounds i8, ptr %pl, i64 %offset13
+ %gep_l14 = getelementptr inbounds i8, ptr %pl, i64 %offset14
+ %gep_l15 = getelementptr inbounds i8, ptr %pl, i64 %offset15
+
+ %load0 = load i8, ptr %gep_l0 , align 16
+ %load1 = load i8, ptr %gep_l1 , align 16
+ %load2 = load i8, ptr %gep_l2 , align 16
+ %load3 = load i8, ptr %gep_l3 , align 16
+ %load4 = load i8, ptr %gep_l4 , align 16
+ %load5 = load i8, ptr %gep_l5 , align 16
+ %load6 = load i8, ptr %gep_l6 , align 16
+ %load7 = load i8, ptr %gep_l7 , align 16
+ %load8 = load i8, ptr %gep_l8 , align 16
+ %load9 = load i8, ptr %gep_l9 , align 16
+ %load10 = load i8, ptr %gep_l10, align 16
+ %load11 = load i8, ptr %gep_l11, align 16
+ %load12 = load i8, ptr %gep_l12, align 16
+ %load13 = load i8, ptr %gep_l13, align 16
+ %load14 = load i8, ptr %gep_l14, align 16
+ %load15 = load i8, ptr %gep_l15, align 16
+
+ %gep_s0 = getelementptr inbounds i8, ptr %ps, i64 0
+ %gep_s1 = getelementptr inbounds i8, ptr %ps, i64 1
+ %gep_s2 = getelementptr inbounds i8, ptr %ps, i64 2
+ %gep_s3 = getelementptr inbounds i8, ptr %ps, i64 3
+ %gep_s4 = getelementptr inbounds i8, ptr %ps, i64 4
+ %gep_s5 = getelementptr inbounds i8, ptr %ps, i64 5
+ %gep_s6 = getelementptr inbounds i8, ptr %ps, i64 6
+ %gep_s7 = getelementptr inbounds i8, ptr %ps, i64 7
+ %gep_s8 = getelementptr inbounds i8, ptr %ps, i64 8
+ %gep_s9 = getelementptr inbounds i8, ptr %ps, i64 9
+ %gep_s10 = getelementptr inbounds i8, ptr %ps, i64 10
+ %gep_s11 = getelementptr inbounds i8, ptr %ps, i64 11
+ %gep_s12 = getelementptr inbounds i8, ptr %ps, i64 12
+ %gep_s13 = getelementptr inbounds i8, ptr %ps, i64 13
+ %gep_s14 = getelementptr inbounds i8, ptr %ps, i64 14
+ %gep_s15 = getelementptr inbounds i8, ptr %ps, i64 15
+
+ store i8 %load0, ptr %gep_s0, align 16
+ store i8 %load1, ptr %gep_s1, align 16
+ store i8 %load2, ptr %gep_s2, align 16
+ store i8 %load3, ptr %gep_s3, align 16
+ store i8 %load4, ptr %gep_s4, align 16
+ store i8 %load5, ptr %gep_s5, align 16
+ store i8 %load6, ptr %gep_s6, align 16
+ store i8 %load7, ptr %gep_s7, align 16
+ store i8 %load8, ptr %gep_s8, align 16
+ store i8 %load9, ptr %gep_s9, align 16
+ store i8 %load10, ptr %gep_s10, align 16
+ store i8 %load11, ptr %gep_s11, align 16
+ store i8 %load12, ptr %gep_s12, align 16
+ store i8 %load13, ptr %gep_s13, align 16
+ store i8 %load14, ptr %gep_s14, align 16
+ store i8 %load15, ptr %gep_s15, align 16
+
+ ret void
+}
diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/vec3-base.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/vec3-base.ll
index 27de36e..430a46b 100644
--- a/llvm/test/Transforms/SLPVectorizer/RISCV/vec3-base.ll
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/vec3-base.ll
@@ -600,29 +600,25 @@ define i32 @dot_product_i32_reorder(ptr %a, ptr %b) {
}
define float @dot_product_fp32(ptr %a, ptr %b) {
-; NON-POW2-LABEL: @dot_product_fp32(
-; NON-POW2-NEXT: [[GEP_A_0:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i32 0
-; NON-POW2-NEXT: [[GEP_B_0:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i32 0
-; NON-POW2-NEXT: [[TMP1:%.*]] = load <3 x float>, ptr [[GEP_A_0]], align 4
-; NON-POW2-NEXT: [[TMP2:%.*]] = load <3 x float>, ptr [[GEP_B_0]], align 4
-; NON-POW2-NEXT: [[TMP3:%.*]] = fmul fast <3 x float> [[TMP1]], [[TMP2]]
-; NON-POW2-NEXT: [[TMP4:%.*]] = call fast float @llvm.vector.reduce.fadd.v3f32(float 0.000000e+00, <3 x float> [[TMP3]])
-; NON-POW2-NEXT: ret float [[TMP4]]
-;
-; POW2-ONLY-LABEL: @dot_product_fp32(
-; POW2-ONLY-NEXT: [[GEP_A_0:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i32 0
-; POW2-ONLY-NEXT: [[GEP_A_2:%.*]] = getelementptr inbounds float, ptr [[A]], i32 2
-; POW2-ONLY-NEXT: [[L_A_2:%.*]] = load float, ptr [[GEP_A_2]], align 4
-; POW2-ONLY-NEXT: [[GEP_B_0:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i32 0
-; POW2-ONLY-NEXT: [[GEP_B_2:%.*]] = getelementptr inbounds float, ptr [[B]], i32 2
-; POW2-ONLY-NEXT: [[L_B_2:%.*]] = load float, ptr [[GEP_B_2]], align 4
-; POW2-ONLY-NEXT: [[TMP1:%.*]] = load <2 x float>, ptr [[GEP_A_0]], align 4
-; POW2-ONLY-NEXT: [[TMP2:%.*]] = load <2 x float>, ptr [[GEP_B_0]], align 4
-; POW2-ONLY-NEXT: [[TMP3:%.*]] = fmul fast <2 x float> [[TMP1]], [[TMP2]]
-; POW2-ONLY-NEXT: [[MUL_2:%.*]] = fmul fast float [[L_A_2]], [[L_B_2]]
-; POW2-ONLY-NEXT: [[ADD_0:%.*]] = call fast float @llvm.vector.reduce.fadd.v2f32(float 0.000000e+00, <2 x float> [[TMP3]])
-; POW2-ONLY-NEXT: [[ADD_1:%.*]] = fadd fast float [[ADD_0]], [[MUL_2]]
-; POW2-ONLY-NEXT: ret float [[ADD_1]]
+; CHECK-LABEL: @dot_product_fp32(
+; CHECK-NEXT: [[GEP_A_0:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i32 0
+; CHECK-NEXT: [[L_A_0:%.*]] = load float, ptr [[GEP_A_0]], align 4
+; CHECK-NEXT: [[GEP_A_1:%.*]] = getelementptr inbounds float, ptr [[A]], i32 1
+; CHECK-NEXT: [[L_A_1:%.*]] = load float, ptr [[GEP_A_1]], align 4
+; CHECK-NEXT: [[GEP_A_2:%.*]] = getelementptr inbounds float, ptr [[A]], i32 2
+; CHECK-NEXT: [[L_A_2:%.*]] = load float, ptr [[GEP_A_2]], align 4
+; CHECK-NEXT: [[GEP_B_0:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i32 0
+; CHECK-NEXT: [[L_B_0:%.*]] = load float, ptr [[GEP_B_0]], align 4
+; CHECK-NEXT: [[GEP_B_1:%.*]] = getelementptr inbounds float, ptr [[B]], i32 1
+; CHECK-NEXT: [[L_B_1:%.*]] = load float, ptr [[GEP_B_1]], align 4
+; CHECK-NEXT: [[GEP_B_2:%.*]] = getelementptr inbounds float, ptr [[B]], i32 2
+; CHECK-NEXT: [[L_B_2:%.*]] = load float, ptr [[GEP_B_2]], align 4
+; CHECK-NEXT: [[MUL_0:%.*]] = fmul fast float [[L_A_0]], [[L_B_0]]
+; CHECK-NEXT: [[MUL_1:%.*]] = fmul fast float [[L_A_1]], [[L_B_1]]
+; CHECK-NEXT: [[MUL_2:%.*]] = fmul fast float [[L_A_2]], [[L_B_2]]
+; CHECK-NEXT: [[ADD_0:%.*]] = fadd fast float [[MUL_0]], [[MUL_1]]
+; CHECK-NEXT: [[ADD_1:%.*]] = fadd fast float [[ADD_0]], [[MUL_2]]
+; CHECK-NEXT: ret float [[ADD_1]]
;
%gep.a.0 = getelementptr inbounds float, ptr %a, i32 0
%l.a.0 = load float, ptr %gep.a.0, align 4
@@ -650,29 +646,25 @@ define float @dot_product_fp32(ptr %a, ptr %b) {
; Same as above, except the reduction order has been perturbed. This
; is checking for our ability to reorder.
define float @dot_product_fp32_reorder(ptr %a, ptr %b) {
-; NON-POW2-LABEL: @dot_product_fp32_reorder(
-; NON-POW2-NEXT: [[GEP_A_0:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i32 0
-; NON-POW2-NEXT: [[GEP_B_0:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i32 0
-; NON-POW2-NEXT: [[TMP1:%.*]] = load <3 x float>, ptr [[GEP_A_0]], align 4
-; NON-POW2-NEXT: [[TMP2:%.*]] = load <3 x float>, ptr [[GEP_B_0]], align 4
-; NON-POW2-NEXT: [[TMP3:%.*]] = fmul fast <3 x float> [[TMP1]], [[TMP2]]
-; NON-POW2-NEXT: [[TMP4:%.*]] = call fast float @llvm.vector.reduce.fadd.v3f32(float 0.000000e+00, <3 x float> [[TMP3]])
-; NON-POW2-NEXT: ret float [[TMP4]]
-;
-; POW2-ONLY-LABEL: @dot_product_fp32_reorder(
-; POW2-ONLY-NEXT: [[GEP_A_0:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i32 0
-; POW2-ONLY-NEXT: [[GEP_A_2:%.*]] = getelementptr inbounds float, ptr [[A]], i32 2
-; POW2-ONLY-NEXT: [[L_A_2:%.*]] = load float, ptr [[GEP_A_2]], align 4
-; POW2-ONLY-NEXT: [[GEP_B_0:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i32 0
-; POW2-ONLY-NEXT: [[GEP_B_2:%.*]] = getelementptr inbounds float, ptr [[B]], i32 2
-; POW2-ONLY-NEXT: [[L_B_2:%.*]] = load float, ptr [[GEP_B_2]], align 4
-; POW2-ONLY-NEXT: [[TMP1:%.*]] = load <2 x float>, ptr [[GEP_A_0]], align 4
-; POW2-ONLY-NEXT: [[TMP2:%.*]] = load <2 x float>, ptr [[GEP_B_0]], align 4
-; POW2-ONLY-NEXT: [[TMP3:%.*]] = fmul fast <2 x float> [[TMP1]], [[TMP2]]
-; POW2-ONLY-NEXT: [[MUL_2:%.*]] = fmul fast float [[L_A_2]], [[L_B_2]]
-; POW2-ONLY-NEXT: [[ADD_0:%.*]] = call fast float @llvm.vector.reduce.fadd.v2f32(float 0.000000e+00, <2 x float> [[TMP3]])
-; POW2-ONLY-NEXT: [[ADD_1:%.*]] = fadd fast float [[ADD_0]], [[MUL_2]]
-; POW2-ONLY-NEXT: ret float [[ADD_1]]
+; CHECK-LABEL: @dot_product_fp32_reorder(
+; CHECK-NEXT: [[GEP_A_0:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i32 0
+; CHECK-NEXT: [[L_A_0:%.*]] = load float, ptr [[GEP_A_0]], align 4
+; CHECK-NEXT: [[GEP_A_1:%.*]] = getelementptr inbounds float, ptr [[A]], i32 1
+; CHECK-NEXT: [[L_A_1:%.*]] = load float, ptr [[GEP_A_1]], align 4
+; CHECK-NEXT: [[GEP_A_2:%.*]] = getelementptr inbounds float, ptr [[A]], i32 2
+; CHECK-NEXT: [[L_A_2:%.*]] = load float, ptr [[GEP_A_2]], align 4
+; CHECK-NEXT: [[GEP_B_0:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i32 0
+; CHECK-NEXT: [[L_B_0:%.*]] = load float, ptr [[GEP_B_0]], align 4
+; CHECK-NEXT: [[GEP_B_1:%.*]] = getelementptr inbounds float, ptr [[B]], i32 1
+; CHECK-NEXT: [[L_B_1:%.*]] = load float, ptr [[GEP_B_1]], align 4
+; CHECK-NEXT: [[GEP_B_2:%.*]] = getelementptr inbounds float, ptr [[B]], i32 2
+; CHECK-NEXT: [[L_B_2:%.*]] = load float, ptr [[GEP_B_2]], align 4
+; CHECK-NEXT: [[MUL_0:%.*]] = fmul fast float [[L_A_0]], [[L_B_0]]
+; CHECK-NEXT: [[MUL_1:%.*]] = fmul fast float [[L_A_1]], [[L_B_1]]
+; CHECK-NEXT: [[MUL_2:%.*]] = fmul fast float [[L_A_2]], [[L_B_2]]
+; CHECK-NEXT: [[ADD_0:%.*]] = fadd fast float [[MUL_1]], [[MUL_0]]
+; CHECK-NEXT: [[ADD_1:%.*]] = fadd fast float [[ADD_0]], [[MUL_2]]
+; CHECK-NEXT: ret float [[ADD_1]]
;
%gep.a.0 = getelementptr inbounds float, ptr %a, i32 0
%l.a.0 = load float, ptr %gep.a.0, align 4
@@ -699,29 +691,25 @@ define float @dot_product_fp32_reorder(ptr %a, ptr %b) {
define double @dot_product_fp64(ptr %a, ptr %b) {
-; NON-POW2-LABEL: @dot_product_fp64(
-; NON-POW2-NEXT: [[GEP_A_0:%.*]] = getelementptr inbounds double, ptr [[A:%.*]], i32 0
-; NON-POW2-NEXT: [[GEP_B_0:%.*]] = getelementptr inbounds double, ptr [[B:%.*]], i32 0
-; NON-POW2-NEXT: [[TMP1:%.*]] = load <3 x double>, ptr [[GEP_A_0]], align 4
-; NON-POW2-NEXT: [[TMP2:%.*]] = load <3 x double>, ptr [[GEP_B_0]], align 4
-; NON-POW2-NEXT: [[TMP3:%.*]] = fmul fast <3 x double> [[TMP1]], [[TMP2]]
-; NON-POW2-NEXT: [[TMP4:%.*]] = call fast double @llvm.vector.reduce.fadd.v3f64(double 0.000000e+00, <3 x double> [[TMP3]])
-; NON-POW2-NEXT: ret double [[TMP4]]
-;
-; POW2-ONLY-LABEL: @dot_product_fp64(
-; POW2-ONLY-NEXT: [[GEP_A_0:%.*]] = getelementptr inbounds double, ptr [[A:%.*]], i32 0
-; POW2-ONLY-NEXT: [[GEP_A_2:%.*]] = getelementptr inbounds double, ptr [[A]], i32 2
-; POW2-ONLY-NEXT: [[L_A_2:%.*]] = load double, ptr [[GEP_A_2]], align 4
-; POW2-ONLY-NEXT: [[GEP_B_0:%.*]] = getelementptr inbounds double, ptr [[B:%.*]], i32 0
-; POW2-ONLY-NEXT: [[GEP_B_2:%.*]] = getelementptr inbounds double, ptr [[B]], i32 2
-; POW2-ONLY-NEXT: [[L_B_2:%.*]] = load double, ptr [[GEP_B_2]], align 4
-; POW2-ONLY-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr [[GEP_A_0]], align 4
-; POW2-ONLY-NEXT: [[TMP2:%.*]] = load <2 x double>, ptr [[GEP_B_0]], align 4
-; POW2-ONLY-NEXT: [[TMP3:%.*]] = fmul fast <2 x double> [[TMP1]], [[TMP2]]
-; POW2-ONLY-NEXT: [[MUL_2:%.*]] = fmul fast double [[L_A_2]], [[L_B_2]]
-; POW2-ONLY-NEXT: [[ADD_0:%.*]] = call fast double @llvm.vector.reduce.fadd.v2f64(double 0.000000e+00, <2 x double> [[TMP3]])
-; POW2-ONLY-NEXT: [[ADD_1:%.*]] = fadd fast double [[ADD_0]], [[MUL_2]]
-; POW2-ONLY-NEXT: ret double [[ADD_1]]
+; CHECK-LABEL: @dot_product_fp64(
+; CHECK-NEXT: [[GEP_A_0:%.*]] = getelementptr inbounds double, ptr [[A:%.*]], i32 0
+; CHECK-NEXT: [[L_A_0:%.*]] = load double, ptr [[GEP_A_0]], align 4
+; CHECK-NEXT: [[GEP_A_1:%.*]] = getelementptr inbounds double, ptr [[A]], i32 1
+; CHECK-NEXT: [[L_A_1:%.*]] = load double, ptr [[GEP_A_1]], align 4
+; CHECK-NEXT: [[GEP_A_2:%.*]] = getelementptr inbounds double, ptr [[A]], i32 2
+; CHECK-NEXT: [[L_A_2:%.*]] = load double, ptr [[GEP_A_2]], align 4
+; CHECK-NEXT: [[GEP_B_0:%.*]] = getelementptr inbounds double, ptr [[B:%.*]], i32 0
+; CHECK-NEXT: [[L_B_0:%.*]] = load double, ptr [[GEP_B_0]], align 4
+; CHECK-NEXT: [[GEP_B_1:%.*]] = getelementptr inbounds double, ptr [[B]], i32 1
+; CHECK-NEXT: [[L_B_1:%.*]] = load double, ptr [[GEP_B_1]], align 4
+; CHECK-NEXT: [[GEP_B_2:%.*]] = getelementptr inbounds double, ptr [[B]], i32 2
+; CHECK-NEXT: [[L_B_2:%.*]] = load double, ptr [[GEP_B_2]], align 4
+; CHECK-NEXT: [[MUL_0:%.*]] = fmul fast double [[L_A_0]], [[L_B_0]]
+; CHECK-NEXT: [[MUL_1:%.*]] = fmul fast double [[L_A_1]], [[L_B_1]]
+; CHECK-NEXT: [[MUL_2:%.*]] = fmul fast double [[L_A_2]], [[L_B_2]]
+; CHECK-NEXT: [[ADD_0:%.*]] = fadd fast double [[MUL_0]], [[MUL_1]]
+; CHECK-NEXT: [[ADD_1:%.*]] = fadd fast double [[ADD_0]], [[MUL_2]]
+; CHECK-NEXT: ret double [[ADD_1]]
;
%gep.a.0 = getelementptr inbounds double, ptr %a, i32 0
%l.a.0 = load double, ptr %gep.a.0, align 4
@@ -778,21 +766,13 @@ entry:
}
define float @reduce_fadd_after_fmul_of_buildvec(float %a, float %b, float %c) {
-; NON-POW2-LABEL: @reduce_fadd_after_fmul_of_buildvec(
-; NON-POW2-NEXT: [[TMP1:%.*]] = insertelement <3 x float> poison, float [[A:%.*]], i32 0
-; NON-POW2-NEXT: [[TMP2:%.*]] = insertelement <3 x float> [[TMP1]], float [[B:%.*]], i32 1
-; NON-POW2-NEXT: [[TMP3:%.*]] = insertelement <3 x float> [[TMP2]], float [[C:%.*]], i32 2
-; NON-POW2-NEXT: [[TMP4:%.*]] = fmul fast <3 x float> [[TMP3]], splat (float 1.000000e+01)
-; NON-POW2-NEXT: [[TMP5:%.*]] = call fast float @llvm.vector.reduce.fadd.v3f32(float 0.000000e+00, <3 x float> [[TMP4]])
-; NON-POW2-NEXT: ret float [[TMP5]]
-;
-; POW2-ONLY-LABEL: @reduce_fadd_after_fmul_of_buildvec(
-; POW2-ONLY-NEXT: [[MUL_0:%.*]] = fmul fast float [[A:%.*]], 1.000000e+01
-; POW2-ONLY-NEXT: [[MUL_1:%.*]] = fmul fast float [[B:%.*]], 1.000000e+01
-; POW2-ONLY-NEXT: [[MUL_2:%.*]] = fmul fast float [[C:%.*]], 1.000000e+01
-; POW2-ONLY-NEXT: [[ADD_0:%.*]] = fadd fast float [[MUL_0]], [[MUL_1]]
-; POW2-ONLY-NEXT: [[ADD_1:%.*]] = fadd fast float [[ADD_0]], [[MUL_2]]
-; POW2-ONLY-NEXT: ret float [[ADD_1]]
+; CHECK-LABEL: @reduce_fadd_after_fmul_of_buildvec(
+; CHECK-NEXT: [[MUL_0:%.*]] = fmul fast float [[A:%.*]], 1.000000e+01
+; CHECK-NEXT: [[MUL_1:%.*]] = fmul fast float [[B:%.*]], 1.000000e+01
+; CHECK-NEXT: [[MUL_2:%.*]] = fmul fast float [[C:%.*]], 1.000000e+01
+; CHECK-NEXT: [[ADD_0:%.*]] = fadd fast float [[MUL_0]], [[MUL_1]]
+; CHECK-NEXT: [[ADD_1:%.*]] = fadd fast float [[ADD_0]], [[MUL_2]]
+; CHECK-NEXT: ret float [[ADD_1]]
;
%mul.0 = fmul fast float %a, 10.0
%mul.1 = fmul fast float %b, 10.0
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/dot-product.ll b/llvm/test/Transforms/SLPVectorizer/X86/dot-product.ll
index 4a8af6d..0879ec2 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/dot-product.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/dot-product.ll
@@ -2,7 +2,7 @@
; RUN: opt < %s -mtriple=x86_64-unknown -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,SSE2
; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=corei7 -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,SSE4
; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=corei7-avx -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX
-; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=core-avx2 -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=core-avx2 -passes=slp-vectorizer -S | FileCheck %s --check-prefixes=CHECK,AVX2
;
; dot4(ptr x, ptr y) - ((xptr y[0])+(xptr y[1])+(xptr y[2])+(xptr y[3]))
@@ -95,12 +95,47 @@ define float @dot4f32(ptr dereferenceable(16) %ptrx, ptr dereferenceable(16) %pt
}
define double @dot4f64_fast(ptr dereferenceable(32) %ptrx, ptr dereferenceable(32) %ptry) {
-; CHECK-LABEL: @dot4f64_fast(
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x double>, ptr [[PTRX:%.*]], align 4
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x double>, ptr [[PTRY:%.*]], align 4
-; CHECK-NEXT: [[TMP3:%.*]] = fmul <4 x double> [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = call fast double @llvm.vector.reduce.fadd.v4f64(double 0.000000e+00, <4 x double> [[TMP3]])
-; CHECK-NEXT: ret double [[TMP4]]
+; SSE2-LABEL: @dot4f64_fast(
+; SSE2-NEXT: [[TMP1:%.*]] = load <4 x double>, ptr [[PTRX:%.*]], align 4
+; SSE2-NEXT: [[TMP2:%.*]] = load <4 x double>, ptr [[PTRY:%.*]], align 4
+; SSE2-NEXT: [[TMP3:%.*]] = fmul <4 x double> [[TMP1]], [[TMP2]]
+; SSE2-NEXT: [[TMP4:%.*]] = call fast double @llvm.vector.reduce.fadd.v4f64(double 0.000000e+00, <4 x double> [[TMP3]])
+; SSE2-NEXT: ret double [[TMP4]]
+;
+; SSE4-LABEL: @dot4f64_fast(
+; SSE4-NEXT: [[TMP1:%.*]] = load <4 x double>, ptr [[PTRX:%.*]], align 4
+; SSE4-NEXT: [[TMP2:%.*]] = load <4 x double>, ptr [[PTRY:%.*]], align 4
+; SSE4-NEXT: [[TMP3:%.*]] = fmul <4 x double> [[TMP1]], [[TMP2]]
+; SSE4-NEXT: [[TMP4:%.*]] = call fast double @llvm.vector.reduce.fadd.v4f64(double 0.000000e+00, <4 x double> [[TMP3]])
+; SSE4-NEXT: ret double [[TMP4]]
+;
+; AVX-LABEL: @dot4f64_fast(
+; AVX-NEXT: [[TMP1:%.*]] = load <4 x double>, ptr [[PTRX:%.*]], align 4
+; AVX-NEXT: [[TMP2:%.*]] = load <4 x double>, ptr [[PTRY:%.*]], align 4
+; AVX-NEXT: [[TMP3:%.*]] = fmul <4 x double> [[TMP1]], [[TMP2]]
+; AVX-NEXT: [[TMP4:%.*]] = call fast double @llvm.vector.reduce.fadd.v4f64(double 0.000000e+00, <4 x double> [[TMP3]])
+; AVX-NEXT: ret double [[TMP4]]
+;
+; AVX2-LABEL: @dot4f64_fast(
+; AVX2-NEXT: [[PTRX1:%.*]] = getelementptr inbounds double, ptr [[PTRX:%.*]], i64 1
+; AVX2-NEXT: [[PTRY1:%.*]] = getelementptr inbounds double, ptr [[PTRY:%.*]], i64 1
+; AVX2-NEXT: [[PTRX2:%.*]] = getelementptr inbounds double, ptr [[PTRX]], i64 2
+; AVX2-NEXT: [[PTRY2:%.*]] = getelementptr inbounds double, ptr [[PTRY]], i64 2
+; AVX2-NEXT: [[X0:%.*]] = load double, ptr [[PTRX]], align 4
+; AVX2-NEXT: [[Y0:%.*]] = load double, ptr [[PTRY]], align 4
+; AVX2-NEXT: [[X1:%.*]] = load double, ptr [[PTRX1]], align 4
+; AVX2-NEXT: [[Y1:%.*]] = load double, ptr [[PTRY1]], align 4
+; AVX2-NEXT: [[MUL0:%.*]] = fmul double [[X0]], [[Y0]]
+; AVX2-NEXT: [[MUL1:%.*]] = fmul double [[X1]], [[Y1]]
+; AVX2-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr [[PTRX2]], align 4
+; AVX2-NEXT: [[TMP2:%.*]] = load <2 x double>, ptr [[PTRY2]], align 4
+; AVX2-NEXT: [[TMP3:%.*]] = fmul <2 x double> [[TMP1]], [[TMP2]]
+; AVX2-NEXT: [[DOT01:%.*]] = fadd fast double [[MUL0]], [[MUL1]]
+; AVX2-NEXT: [[TMP4:%.*]] = extractelement <2 x double> [[TMP3]], i32 0
+; AVX2-NEXT: [[DOT012:%.*]] = fadd fast double [[DOT01]], [[TMP4]]
+; AVX2-NEXT: [[TMP5:%.*]] = extractelement <2 x double> [[TMP3]], i32 1
+; AVX2-NEXT: [[DOT0123:%.*]] = fadd fast double [[DOT012]], [[TMP5]]
+; AVX2-NEXT: ret double [[DOT0123]]
;
%ptrx1 = getelementptr inbounds double, ptr %ptrx, i64 1
%ptry1 = getelementptr inbounds double, ptr %ptry, i64 1
@@ -127,12 +162,47 @@ define double @dot4f64_fast(ptr dereferenceable(32) %ptrx, ptr dereferenceable(3
}
define float @dot4f32_fast(ptr dereferenceable(16) %ptrx, ptr dereferenceable(16) %ptry) {
-; CHECK-LABEL: @dot4f32_fast(
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[PTRX:%.*]], align 4
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x float>, ptr [[PTRY:%.*]], align 4
-; CHECK-NEXT: [[TMP3:%.*]] = fmul <4 x float> [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[TMP3]])
-; CHECK-NEXT: ret float [[TMP4]]
+; SSE2-LABEL: @dot4f32_fast(
+; SSE2-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[PTRX:%.*]], align 4
+; SSE2-NEXT: [[TMP2:%.*]] = load <4 x float>, ptr [[PTRY:%.*]], align 4
+; SSE2-NEXT: [[TMP3:%.*]] = fmul <4 x float> [[TMP1]], [[TMP2]]
+; SSE2-NEXT: [[TMP4:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[TMP3]])
+; SSE2-NEXT: ret float [[TMP4]]
+;
+; SSE4-LABEL: @dot4f32_fast(
+; SSE4-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[PTRX:%.*]], align 4
+; SSE4-NEXT: [[TMP2:%.*]] = load <4 x float>, ptr [[PTRY:%.*]], align 4
+; SSE4-NEXT: [[TMP3:%.*]] = fmul <4 x float> [[TMP1]], [[TMP2]]
+; SSE4-NEXT: [[TMP4:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[TMP3]])
+; SSE4-NEXT: ret float [[TMP4]]
+;
+; AVX-LABEL: @dot4f32_fast(
+; AVX-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[PTRX:%.*]], align 4
+; AVX-NEXT: [[TMP2:%.*]] = load <4 x float>, ptr [[PTRY:%.*]], align 4
+; AVX-NEXT: [[TMP3:%.*]] = fmul <4 x float> [[TMP1]], [[TMP2]]
+; AVX-NEXT: [[TMP4:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[TMP3]])
+; AVX-NEXT: ret float [[TMP4]]
+;
+; AVX2-LABEL: @dot4f32_fast(
+; AVX2-NEXT: [[PTRX1:%.*]] = getelementptr inbounds float, ptr [[PTRX:%.*]], i64 1
+; AVX2-NEXT: [[PTRY1:%.*]] = getelementptr inbounds float, ptr [[PTRY:%.*]], i64 1
+; AVX2-NEXT: [[PTRX2:%.*]] = getelementptr inbounds float, ptr [[PTRX]], i64 2
+; AVX2-NEXT: [[PTRY2:%.*]] = getelementptr inbounds float, ptr [[PTRY]], i64 2
+; AVX2-NEXT: [[X0:%.*]] = load float, ptr [[PTRX]], align 4
+; AVX2-NEXT: [[Y0:%.*]] = load float, ptr [[PTRY]], align 4
+; AVX2-NEXT: [[X1:%.*]] = load float, ptr [[PTRX1]], align 4
+; AVX2-NEXT: [[Y1:%.*]] = load float, ptr [[PTRY1]], align 4
+; AVX2-NEXT: [[MUL0:%.*]] = fmul float [[X0]], [[Y0]]
+; AVX2-NEXT: [[MUL1:%.*]] = fmul float [[X1]], [[Y1]]
+; AVX2-NEXT: [[TMP1:%.*]] = load <2 x float>, ptr [[PTRX2]], align 4
+; AVX2-NEXT: [[TMP2:%.*]] = load <2 x float>, ptr [[PTRY2]], align 4
+; AVX2-NEXT: [[TMP3:%.*]] = fmul <2 x float> [[TMP1]], [[TMP2]]
+; AVX2-NEXT: [[DOT01:%.*]] = fadd fast float [[MUL0]], [[MUL1]]
+; AVX2-NEXT: [[TMP4:%.*]] = extractelement <2 x float> [[TMP3]], i32 0
+; AVX2-NEXT: [[DOT012:%.*]] = fadd fast float [[DOT01]], [[TMP4]]
+; AVX2-NEXT: [[TMP5:%.*]] = extractelement <2 x float> [[TMP3]], i32 1
+; AVX2-NEXT: [[DOT0123:%.*]] = fadd fast float [[DOT012]], [[TMP5]]
+; AVX2-NEXT: ret float [[DOT0123]]
;
%ptrx1 = getelementptr inbounds float, ptr %ptrx, i64 1
%ptry1 = getelementptr inbounds float, ptr %ptry, i64 1
@@ -372,6 +442,18 @@ define double @dot2f64_fast(ptr dereferenceable(16) %ptrx, ptr dereferenceable(1
; AVX-NEXT: [[DOT01:%.*]] = fadd fast double [[TMP4]], [[TMP5]]
; AVX-NEXT: ret double [[DOT01]]
;
+; AVX2-LABEL: @dot2f64_fast(
+; AVX2-NEXT: [[PTRX1:%.*]] = getelementptr inbounds double, ptr [[PTRX:%.*]], i64 1
+; AVX2-NEXT: [[PTRY1:%.*]] = getelementptr inbounds double, ptr [[PTRY:%.*]], i64 1
+; AVX2-NEXT: [[X0:%.*]] = load double, ptr [[PTRX]], align 4
+; AVX2-NEXT: [[Y0:%.*]] = load double, ptr [[PTRY]], align 4
+; AVX2-NEXT: [[X1:%.*]] = load double, ptr [[PTRX1]], align 4
+; AVX2-NEXT: [[Y1:%.*]] = load double, ptr [[PTRY1]], align 4
+; AVX2-NEXT: [[MUL0:%.*]] = fmul double [[X0]], [[Y0]]
+; AVX2-NEXT: [[MUL1:%.*]] = fmul double [[X1]], [[Y1]]
+; AVX2-NEXT: [[DOT01:%.*]] = fadd fast double [[MUL0]], [[MUL1]]
+; AVX2-NEXT: ret double [[DOT01]]
+;
%ptrx1 = getelementptr inbounds double, ptr %ptrx, i64 1
%ptry1 = getelementptr inbounds double, ptr %ptry, i64 1
%x0 = load double, ptr %ptrx, align 4
@@ -410,6 +492,18 @@ define float @dot2f32_fast(ptr dereferenceable(16) %ptrx, ptr dereferenceable(16
; AVX-NEXT: [[DOT01:%.*]] = fadd fast float [[TMP4]], [[TMP5]]
; AVX-NEXT: ret float [[DOT01]]
;
+; AVX2-LABEL: @dot2f32_fast(
+; AVX2-NEXT: [[PTRX1:%.*]] = getelementptr inbounds float, ptr [[PTRX:%.*]], i64 1
+; AVX2-NEXT: [[PTRY1:%.*]] = getelementptr inbounds float, ptr [[PTRY:%.*]], i64 1
+; AVX2-NEXT: [[X0:%.*]] = load float, ptr [[PTRX]], align 4
+; AVX2-NEXT: [[Y0:%.*]] = load float, ptr [[PTRY]], align 4
+; AVX2-NEXT: [[X1:%.*]] = load float, ptr [[PTRX1]], align 4
+; AVX2-NEXT: [[Y1:%.*]] = load float, ptr [[PTRY1]], align 4
+; AVX2-NEXT: [[MUL0:%.*]] = fmul float [[X0]], [[Y0]]
+; AVX2-NEXT: [[MUL1:%.*]] = fmul float [[X1]], [[Y1]]
+; AVX2-NEXT: [[DOT01:%.*]] = fadd fast float [[MUL0]], [[MUL1]]
+; AVX2-NEXT: ret float [[DOT01]]
+;
%ptrx1 = getelementptr inbounds float, ptr %ptrx, i64 1
%ptry1 = getelementptr inbounds float, ptr %ptry, i64 1
%x0 = load float, ptr %ptrx, align 4
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/horizontal-list.ll b/llvm/test/Transforms/SLPVectorizer/X86/horizontal-list.ll
index eaa77d7..0bbdeb55 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/horizontal-list.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/horizontal-list.ll
@@ -31,12 +31,9 @@ define float @baz() {
; THRESHOLD-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @arr, align 16
; THRESHOLD-NEXT: [[TMP2:%.*]] = load <4 x float>, ptr @arr1, align 16
; THRESHOLD-NEXT: [[TMP3:%.*]] = fmul fast <4 x float> [[TMP2]], [[TMP1]]
+; THRESHOLD-NEXT: [[TMP8:%.*]] = fmul fast float [[CONV]], 2.000000e+00
; THRESHOLD-NEXT: [[TMP4:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[TMP3]])
-; THRESHOLD-NEXT: [[TMP5:%.*]] = insertelement <2 x float> poison, float [[CONV]], i32 0
-; THRESHOLD-NEXT: [[TMP6:%.*]] = insertelement <2 x float> [[TMP5]], float [[TMP4]], i32 1
-; THRESHOLD-NEXT: [[TMP7:%.*]] = fmul fast <2 x float> [[TMP6]], splat (float 2.000000e+00)
-; THRESHOLD-NEXT: [[TMP8:%.*]] = extractelement <2 x float> [[TMP7]], i32 0
-; THRESHOLD-NEXT: [[TMP9:%.*]] = extractelement <2 x float> [[TMP7]], i32 1
+; THRESHOLD-NEXT: [[TMP9:%.*]] = fmul fast float [[TMP4]], 2.000000e+00
; THRESHOLD-NEXT: [[OP_RDX:%.*]] = fadd fast float [[TMP8]], [[TMP9]]
; THRESHOLD-NEXT: store float [[OP_RDX]], ptr @res, align 4
; THRESHOLD-NEXT: ret float [[OP_RDX]]
@@ -76,14 +73,41 @@ define float @bazz() {
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @n, align 4
; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP0]], 3
; CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[MUL]] to float
+; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr @arr, align 16
+; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr @arr1, align 16
+; CHECK-NEXT: [[MUL4:%.*]] = fmul fast float [[TMP2]], [[TMP1]]
+; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[MUL4]], [[CONV]]
+; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr, i64 0, i64 1), align 4
+; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr1, i64 0, i64 1), align 4
+; CHECK-NEXT: [[MUL4_1:%.*]] = fmul fast float [[TMP4]], [[TMP3]]
+; CHECK-NEXT: [[ADD_1:%.*]] = fadd fast float [[MUL4_1]], [[ADD]]
+; CHECK-NEXT: [[TMP5:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr, i64 0, i64 2), align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr1, i64 0, i64 2), align 8
+; CHECK-NEXT: [[MUL4_2:%.*]] = fmul fast float [[TMP6]], [[TMP5]]
+; CHECK-NEXT: [[ADD_2:%.*]] = fadd fast float [[MUL4_2]], [[ADD_1]]
+; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr, i64 0, i64 3), align 4
+; CHECK-NEXT: [[TMP8:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr1, i64 0, i64 3), align 4
+; CHECK-NEXT: [[MUL4_3:%.*]] = fmul fast float [[TMP8]], [[TMP7]]
+; CHECK-NEXT: [[ADD_3:%.*]] = fadd fast float [[MUL4_3]], [[ADD_2]]
; CHECK-NEXT: [[MUL5:%.*]] = shl nsw i32 [[TMP0]], 2
; CHECK-NEXT: [[CONV6:%.*]] = sitofp i32 [[MUL5]] to float
-; CHECK-NEXT: [[TMP1:%.*]] = load <8 x float>, ptr @arr, align 16
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x float>, ptr @arr1, align 16
-; CHECK-NEXT: [[TMP3:%.*]] = fmul fast <8 x float> [[TMP2]], [[TMP1]]
-; CHECK-NEXT: [[TMP4:%.*]] = call fast float @llvm.vector.reduce.fadd.v8f32(float 0.000000e+00, <8 x float> [[TMP3]])
-; CHECK-NEXT: [[OP_RDX:%.*]] = fadd fast float [[TMP4]], [[CONV]]
-; CHECK-NEXT: [[OP_RDX1:%.*]] = fadd fast float [[OP_RDX]], [[CONV6]]
+; CHECK-NEXT: [[ADD7:%.*]] = fadd fast float [[ADD_3]], [[CONV6]]
+; CHECK-NEXT: [[TMP9:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr, i64 0, i64 4), align 16
+; CHECK-NEXT: [[TMP10:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr1, i64 0, i64 4), align 16
+; CHECK-NEXT: [[MUL18:%.*]] = fmul fast float [[TMP10]], [[TMP9]]
+; CHECK-NEXT: [[ADD19:%.*]] = fadd fast float [[MUL18]], [[ADD7]]
+; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr, i64 0, i64 5), align 4
+; CHECK-NEXT: [[TMP12:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr1, i64 0, i64 5), align 4
+; CHECK-NEXT: [[MUL18_1:%.*]] = fmul fast float [[TMP12]], [[TMP11]]
+; CHECK-NEXT: [[ADD19_1:%.*]] = fadd fast float [[MUL18_1]], [[ADD19]]
+; CHECK-NEXT: [[TMP13:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr, i64 0, i64 6), align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr1, i64 0, i64 6), align 8
+; CHECK-NEXT: [[MUL18_2:%.*]] = fmul fast float [[TMP14]], [[TMP13]]
+; CHECK-NEXT: [[ADD19_2:%.*]] = fadd fast float [[MUL18_2]], [[ADD19_1]]
+; CHECK-NEXT: [[TMP15:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr, i64 0, i64 7), align 4
+; CHECK-NEXT: [[TMP16:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr1, i64 0, i64 7), align 4
+; CHECK-NEXT: [[MUL18_3:%.*]] = fmul fast float [[TMP16]], [[TMP15]]
+; CHECK-NEXT: [[OP_RDX1:%.*]] = fadd fast float [[MUL18_3]], [[ADD19_2]]
; CHECK-NEXT: store float [[OP_RDX1]], ptr @res, align 4
; CHECK-NEXT: ret float [[OP_RDX1]]
;
@@ -92,14 +116,41 @@ define float @bazz() {
; THRESHOLD-NEXT: [[TMP0:%.*]] = load i32, ptr @n, align 4
; THRESHOLD-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP0]], 3
; THRESHOLD-NEXT: [[CONV:%.*]] = sitofp i32 [[MUL]] to float
+; THRESHOLD-NEXT: [[TMP1:%.*]] = load float, ptr @arr, align 16
+; THRESHOLD-NEXT: [[TMP2:%.*]] = load float, ptr @arr1, align 16
+; THRESHOLD-NEXT: [[MUL4:%.*]] = fmul fast float [[TMP2]], [[TMP1]]
+; THRESHOLD-NEXT: [[ADD:%.*]] = fadd fast float [[MUL4]], [[CONV]]
+; THRESHOLD-NEXT: [[TMP3:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr, i64 0, i64 1), align 4
+; THRESHOLD-NEXT: [[TMP4:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr1, i64 0, i64 1), align 4
+; THRESHOLD-NEXT: [[MUL4_1:%.*]] = fmul fast float [[TMP4]], [[TMP3]]
+; THRESHOLD-NEXT: [[ADD_1:%.*]] = fadd fast float [[MUL4_1]], [[ADD]]
+; THRESHOLD-NEXT: [[TMP5:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr, i64 0, i64 2), align 8
+; THRESHOLD-NEXT: [[TMP6:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr1, i64 0, i64 2), align 8
+; THRESHOLD-NEXT: [[MUL4_2:%.*]] = fmul fast float [[TMP6]], [[TMP5]]
+; THRESHOLD-NEXT: [[ADD_2:%.*]] = fadd fast float [[MUL4_2]], [[ADD_1]]
+; THRESHOLD-NEXT: [[TMP7:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr, i64 0, i64 3), align 4
+; THRESHOLD-NEXT: [[TMP8:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr1, i64 0, i64 3), align 4
+; THRESHOLD-NEXT: [[MUL4_3:%.*]] = fmul fast float [[TMP8]], [[TMP7]]
+; THRESHOLD-NEXT: [[ADD_3:%.*]] = fadd fast float [[MUL4_3]], [[ADD_2]]
; THRESHOLD-NEXT: [[MUL5:%.*]] = shl nsw i32 [[TMP0]], 2
; THRESHOLD-NEXT: [[CONV6:%.*]] = sitofp i32 [[MUL5]] to float
-; THRESHOLD-NEXT: [[TMP1:%.*]] = load <8 x float>, ptr @arr, align 16
-; THRESHOLD-NEXT: [[TMP2:%.*]] = load <8 x float>, ptr @arr1, align 16
-; THRESHOLD-NEXT: [[TMP3:%.*]] = fmul fast <8 x float> [[TMP2]], [[TMP1]]
-; THRESHOLD-NEXT: [[TMP4:%.*]] = call fast float @llvm.vector.reduce.fadd.v8f32(float 0.000000e+00, <8 x float> [[TMP3]])
-; THRESHOLD-NEXT: [[OP_RDX:%.*]] = fadd fast float [[TMP4]], [[CONV]]
-; THRESHOLD-NEXT: [[OP_RDX1:%.*]] = fadd fast float [[OP_RDX]], [[CONV6]]
+; THRESHOLD-NEXT: [[ADD7:%.*]] = fadd fast float [[ADD_3]], [[CONV6]]
+; THRESHOLD-NEXT: [[TMP9:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr, i64 0, i64 4), align 16
+; THRESHOLD-NEXT: [[TMP10:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr1, i64 0, i64 4), align 16
+; THRESHOLD-NEXT: [[MUL18:%.*]] = fmul fast float [[TMP10]], [[TMP9]]
+; THRESHOLD-NEXT: [[ADD19:%.*]] = fadd fast float [[MUL18]], [[ADD7]]
+; THRESHOLD-NEXT: [[TMP11:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr, i64 0, i64 5), align 4
+; THRESHOLD-NEXT: [[TMP12:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr1, i64 0, i64 5), align 4
+; THRESHOLD-NEXT: [[MUL18_1:%.*]] = fmul fast float [[TMP12]], [[TMP11]]
+; THRESHOLD-NEXT: [[ADD19_1:%.*]] = fadd fast float [[MUL18_1]], [[ADD19]]
+; THRESHOLD-NEXT: [[TMP13:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr, i64 0, i64 6), align 8
+; THRESHOLD-NEXT: [[TMP14:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr1, i64 0, i64 6), align 8
+; THRESHOLD-NEXT: [[MUL18_2:%.*]] = fmul fast float [[TMP14]], [[TMP13]]
+; THRESHOLD-NEXT: [[ADD19_2:%.*]] = fadd fast float [[MUL18_2]], [[ADD19_1]]
+; THRESHOLD-NEXT: [[TMP15:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr, i64 0, i64 7), align 4
+; THRESHOLD-NEXT: [[TMP16:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr1, i64 0, i64 7), align 4
+; THRESHOLD-NEXT: [[MUL18_3:%.*]] = fmul fast float [[TMP16]], [[TMP15]]
+; THRESHOLD-NEXT: [[OP_RDX1:%.*]] = fadd fast float [[MUL18_3]], [[ADD19_2]]
; THRESHOLD-NEXT: store float [[OP_RDX1]], ptr @res, align 4
; THRESHOLD-NEXT: ret float [[OP_RDX1]]
;
@@ -151,10 +202,21 @@ define float @bazzz() {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @n, align 4
; CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP0]] to float
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @arr, align 16
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x float>, ptr @arr1, align 16
-; CHECK-NEXT: [[TMP3:%.*]] = fmul fast <4 x float> [[TMP2]], [[TMP1]]
-; CHECK-NEXT: [[TMP4:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[TMP3]])
+; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr @arr, align 16
+; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr @arr1, align 16
+; CHECK-NEXT: [[MUL:%.*]] = fmul fast float [[TMP2]], [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr, i64 0, i64 1), align 4
+; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr1, i64 0, i64 1), align 4
+; CHECK-NEXT: [[MUL_1:%.*]] = fmul fast float [[TMP11]], [[TMP3]]
+; CHECK-NEXT: [[TMP12:%.*]] = fadd fast float [[MUL_1]], [[MUL]]
+; CHECK-NEXT: [[TMP6:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr, i64 0, i64 2), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr1, i64 0, i64 2), align 8
+; CHECK-NEXT: [[MUL_2:%.*]] = fmul fast float [[TMP7]], [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = fadd fast float [[MUL_2]], [[TMP12]]
+; CHECK-NEXT: [[TMP9:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr, i64 0, i64 3), align 4
+; CHECK-NEXT: [[TMP10:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr1, i64 0, i64 3), align 4
+; CHECK-NEXT: [[MUL_3:%.*]] = fmul fast float [[TMP10]], [[TMP9]]
+; CHECK-NEXT: [[TMP4:%.*]] = fadd fast float [[MUL_3]], [[TMP8]]
; CHECK-NEXT: [[TMP5:%.*]] = fmul fast float [[CONV]], [[TMP4]]
; CHECK-NEXT: store float [[TMP5]], ptr @res, align 4
; CHECK-NEXT: ret float [[TMP5]]
@@ -163,10 +225,21 @@ define float @bazzz() {
; THRESHOLD-NEXT: entry:
; THRESHOLD-NEXT: [[TMP0:%.*]] = load i32, ptr @n, align 4
; THRESHOLD-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP0]] to float
-; THRESHOLD-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @arr, align 16
-; THRESHOLD-NEXT: [[TMP2:%.*]] = load <4 x float>, ptr @arr1, align 16
-; THRESHOLD-NEXT: [[TMP3:%.*]] = fmul fast <4 x float> [[TMP2]], [[TMP1]]
-; THRESHOLD-NEXT: [[TMP4:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[TMP3]])
+; THRESHOLD-NEXT: [[TMP1:%.*]] = load float, ptr @arr, align 16
+; THRESHOLD-NEXT: [[TMP2:%.*]] = load float, ptr @arr1, align 16
+; THRESHOLD-NEXT: [[MUL:%.*]] = fmul fast float [[TMP2]], [[TMP1]]
+; THRESHOLD-NEXT: [[TMP3:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr, i64 0, i64 1), align 4
+; THRESHOLD-NEXT: [[TMP11:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr1, i64 0, i64 1), align 4
+; THRESHOLD-NEXT: [[MUL_1:%.*]] = fmul fast float [[TMP11]], [[TMP3]]
+; THRESHOLD-NEXT: [[TMP12:%.*]] = fadd fast float [[MUL_1]], [[MUL]]
+; THRESHOLD-NEXT: [[TMP6:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr, i64 0, i64 2), align 8
+; THRESHOLD-NEXT: [[TMP7:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr1, i64 0, i64 2), align 8
+; THRESHOLD-NEXT: [[MUL_2:%.*]] = fmul fast float [[TMP7]], [[TMP6]]
+; THRESHOLD-NEXT: [[TMP8:%.*]] = fadd fast float [[MUL_2]], [[TMP12]]
+; THRESHOLD-NEXT: [[TMP9:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr, i64 0, i64 3), align 4
+; THRESHOLD-NEXT: [[TMP10:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr1, i64 0, i64 3), align 4
+; THRESHOLD-NEXT: [[MUL_3:%.*]] = fmul fast float [[TMP10]], [[TMP9]]
+; THRESHOLD-NEXT: [[TMP4:%.*]] = fadd fast float [[MUL_3]], [[TMP8]]
; THRESHOLD-NEXT: [[TMP5:%.*]] = fmul fast float [[CONV]], [[TMP4]]
; THRESHOLD-NEXT: store float [[TMP5]], ptr @res, align 4
; THRESHOLD-NEXT: ret float [[TMP5]]
@@ -199,10 +272,21 @@ define i32 @foo() {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @n, align 4
; CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP0]] to float
-; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @arr, align 16
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x float>, ptr @arr1, align 16
-; CHECK-NEXT: [[TMP3:%.*]] = fmul fast <4 x float> [[TMP2]], [[TMP1]]
-; CHECK-NEXT: [[TMP4:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[TMP3]])
+; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr @arr, align 16
+; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr @arr1, align 16
+; CHECK-NEXT: [[MUL:%.*]] = fmul fast float [[TMP2]], [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr, i64 0, i64 1), align 4
+; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr1, i64 0, i64 1), align 4
+; CHECK-NEXT: [[MUL_1:%.*]] = fmul fast float [[TMP11]], [[TMP3]]
+; CHECK-NEXT: [[TMP12:%.*]] = fadd fast float [[MUL_1]], [[MUL]]
+; CHECK-NEXT: [[TMP6:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr, i64 0, i64 2), align 8
+; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr1, i64 0, i64 2), align 8
+; CHECK-NEXT: [[MUL_2:%.*]] = fmul fast float [[TMP7]], [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = fadd fast float [[MUL_2]], [[TMP12]]
+; CHECK-NEXT: [[TMP9:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr, i64 0, i64 3), align 4
+; CHECK-NEXT: [[TMP10:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr1, i64 0, i64 3), align 4
+; CHECK-NEXT: [[MUL_3:%.*]] = fmul fast float [[TMP10]], [[TMP9]]
+; CHECK-NEXT: [[TMP4:%.*]] = fadd fast float [[MUL_3]], [[TMP8]]
; CHECK-NEXT: [[TMP5:%.*]] = fmul fast float [[CONV]], [[TMP4]]
; CHECK-NEXT: [[CONV4:%.*]] = fptosi float [[TMP5]] to i32
; CHECK-NEXT: store i32 [[CONV4]], ptr @n, align 4
@@ -212,10 +296,21 @@ define i32 @foo() {
; THRESHOLD-NEXT: entry:
; THRESHOLD-NEXT: [[TMP0:%.*]] = load i32, ptr @n, align 4
; THRESHOLD-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP0]] to float
-; THRESHOLD-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr @arr, align 16
-; THRESHOLD-NEXT: [[TMP2:%.*]] = load <4 x float>, ptr @arr1, align 16
-; THRESHOLD-NEXT: [[TMP3:%.*]] = fmul fast <4 x float> [[TMP2]], [[TMP1]]
-; THRESHOLD-NEXT: [[TMP4:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float 0.000000e+00, <4 x float> [[TMP3]])
+; THRESHOLD-NEXT: [[TMP1:%.*]] = load float, ptr @arr, align 16
+; THRESHOLD-NEXT: [[TMP2:%.*]] = load float, ptr @arr1, align 16
+; THRESHOLD-NEXT: [[MUL:%.*]] = fmul fast float [[TMP2]], [[TMP1]]
+; THRESHOLD-NEXT: [[TMP3:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr, i64 0, i64 1), align 4
+; THRESHOLD-NEXT: [[TMP11:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr1, i64 0, i64 1), align 4
+; THRESHOLD-NEXT: [[MUL_1:%.*]] = fmul fast float [[TMP11]], [[TMP3]]
+; THRESHOLD-NEXT: [[TMP12:%.*]] = fadd fast float [[MUL_1]], [[MUL]]
+; THRESHOLD-NEXT: [[TMP6:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr, i64 0, i64 2), align 8
+; THRESHOLD-NEXT: [[TMP7:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr1, i64 0, i64 2), align 8
+; THRESHOLD-NEXT: [[MUL_2:%.*]] = fmul fast float [[TMP7]], [[TMP6]]
+; THRESHOLD-NEXT: [[TMP8:%.*]] = fadd fast float [[MUL_2]], [[TMP12]]
+; THRESHOLD-NEXT: [[TMP9:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr, i64 0, i64 3), align 4
+; THRESHOLD-NEXT: [[TMP10:%.*]] = load float, ptr getelementptr inbounds ([20 x float], ptr @arr1, i64 0, i64 3), align 4
+; THRESHOLD-NEXT: [[MUL_3:%.*]] = fmul fast float [[TMP10]], [[TMP9]]
+; THRESHOLD-NEXT: [[TMP4:%.*]] = fadd fast float [[MUL_3]], [[TMP8]]
; THRESHOLD-NEXT: [[TMP5:%.*]] = fmul fast float [[CONV]], [[TMP4]]
; THRESHOLD-NEXT: [[CONV4:%.*]] = fptosi float [[TMP5]] to i32
; THRESHOLD-NEXT: store i32 [[CONV4]], ptr @n, align 4
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/pr35497.ll b/llvm/test/Transforms/SLPVectorizer/X86/pr35497.ll
index 9fbe0a5..ea637bb 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/pr35497.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/pr35497.ll
@@ -8,119 +8,134 @@
%"class.3" = type { %"struct.1", i64 }
%"struct.1" = type { [8 x i64] }
-$_ZN1C10SwitchModeEv = comdat any
-
; Function Attrs: uwtable
-define void @_ZN1C10SwitchModeEv() local_unnamed_addr #0 comdat align 2 {
+define void @_ZN1C10SwitchModeEv(ptr %p, i64 %c) {
; SSE-LABEL: @_ZN1C10SwitchModeEv(
; SSE-NEXT: for.body.lr.ph.i:
-; SSE-NEXT: [[OR_1:%.*]] = or i64 undef, 1
-; SSE-NEXT: store i64 [[OR_1]], ptr undef, align 8
-; SSE-NEXT: [[FOO_3:%.*]] = load i64, ptr undef, align 8
-; SSE-NEXT: [[FOO_2:%.*]] = getelementptr inbounds [[CLASS_1:%.*]], ptr undef, i64 0, i32 0, i32 0, i32 0, i32 0, i64 1
+; SSE-NEXT: [[BAR5:%.*]] = or i64 [[C:%.*]], 1
+; SSE-NEXT: store i64 [[BAR5]], ptr [[FOO_2:%.*]], align 8
; SSE-NEXT: [[FOO_4:%.*]] = load i64, ptr [[FOO_2]], align 8
-; SSE-NEXT: [[BAR5:%.*]] = load i64, ptr undef, align 8
-; SSE-NEXT: [[AND_2:%.*]] = and i64 [[OR_1]], [[FOO_3]]
+; SSE-NEXT: [[FOO_3:%.*]] = getelementptr inbounds [[CLASS_1:%.*]], ptr [[FOO_2]], i64 0, i32 0, i32 0, i32 0, i32 0, i64 1
+; SSE-NEXT: [[FOO_5:%.*]] = load i64, ptr [[FOO_3]], align 8
+; SSE-NEXT: [[BAR6:%.*]] = load i64, ptr [[FOO_2]], align 8
; SSE-NEXT: [[AND_1:%.*]] = and i64 [[BAR5]], [[FOO_4]]
-; SSE-NEXT: store i64 [[AND_2]], ptr undef, align 8
-; SSE-NEXT: [[BAR4:%.*]] = getelementptr inbounds [[CLASS_2:%.*]], ptr undef, i64 0, i32 0, i32 0, i32 0, i64 1
-; SSE-NEXT: store i64 [[AND_1]], ptr [[BAR4]], align 8
+; SSE-NEXT: [[AND_2:%.*]] = and i64 [[BAR6]], [[FOO_5]]
+; SSE-NEXT: store i64 [[AND_1]], ptr [[FOO_2]], align 8
+; SSE-NEXT: [[BAR4:%.*]] = getelementptr inbounds [[CLASS_2:%.*]], ptr [[FOO_2]], i64 0, i32 0, i32 0, i32 0, i64 1
+; SSE-NEXT: store i64 [[AND_2]], ptr [[BAR4]], align 8
; SSE-NEXT: ret void
;
; AVX-LABEL: @_ZN1C10SwitchModeEv(
; AVX-NEXT: for.body.lr.ph.i:
-; AVX-NEXT: [[OR_1:%.*]] = or i64 undef, 1
-; AVX-NEXT: store i64 [[OR_1]], ptr undef, align 8
-; AVX-NEXT: [[BAR5:%.*]] = load i64, ptr undef, align 8
-; AVX-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr undef, align 8
+; AVX-NEXT: [[OR_1:%.*]] = or i64 [[C:%.*]], 1
+; AVX-NEXT: store i64 [[OR_1]], ptr [[P:%.*]], align 8
+; AVX-NEXT: [[BAR5:%.*]] = load i64, ptr [[P]], align 8
+; AVX-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr [[P]], align 8
; AVX-NEXT: [[TMP1:%.*]] = insertelement <2 x i64> poison, i64 [[OR_1]], i32 0
; AVX-NEXT: [[TMP2:%.*]] = insertelement <2 x i64> [[TMP1]], i64 [[BAR5]], i32 1
; AVX-NEXT: [[TMP3:%.*]] = and <2 x i64> [[TMP2]], [[TMP0]]
-; AVX-NEXT: store <2 x i64> [[TMP3]], ptr undef, align 8
+; AVX-NEXT: store <2 x i64> [[TMP3]], ptr [[P]], align 8
; AVX-NEXT: ret void
;
for.body.lr.ph.i:
- %or.1 = or i64 undef, 1
- store i64 %or.1, ptr undef, align 8
- %foo.3 = load i64, ptr undef, align 8
- %foo.2 = getelementptr inbounds %class.1, ptr undef, i64 0, i32 0, i32 0, i32 0, i32 0, i64 1
+ %or.1 = or i64 %c, 1
+ store i64 %or.1, ptr %p, align 8
+ %foo.3 = load i64, ptr %p, align 8
+ %foo.2 = getelementptr inbounds %class.1, ptr %p, i64 0, i32 0, i32 0, i32 0, i32 0, i64 1
%foo.4 = load i64, ptr %foo.2, align 8
- %bar5 = load i64, ptr undef, align 8
+ %bar5 = load i64, ptr %p, align 8
%and.2 = and i64 %or.1, %foo.3
%and.1 = and i64 %bar5, %foo.4
- store i64 %and.2, ptr undef, align 8
- %bar4 = getelementptr inbounds %class.2, ptr undef, i64 0, i32 0, i32 0, i32 0, i64 1
+ store i64 %and.2, ptr %p, align 8
+ %bar4 = getelementptr inbounds %class.2, ptr %p, i64 0, i32 0, i32 0, i32 0, i64 1
store i64 %and.1, ptr %bar4, align 8
ret void
}
; Function Attrs: norecurse nounwind uwtable
-define void @pr35497() local_unnamed_addr #0 {
+define void @pr35497(ptr %p, i64 %c) {
; SSE-LABEL: @pr35497(
; SSE-NEXT: entry:
-; SSE-NEXT: [[TMP0:%.*]] = load i64, ptr undef, align 1
-; SSE-NEXT: [[ADD:%.*]] = add i64 undef, undef
-; SSE-NEXT: store i64 [[ADD]], ptr undef, align 1
-; SSE-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds [0 x i64], ptr undef, i64 0, i64 4
-; SSE-NEXT: [[TMP1:%.*]] = insertelement <2 x i64> <i64 poison, i64 undef>, i64 [[TMP0]], i32 0
-; SSE-NEXT: [[TMP2:%.*]] = shl <2 x i64> [[TMP1]], splat (i64 2)
-; SSE-NEXT: [[TMP3:%.*]] = and <2 x i64> [[TMP2]], splat (i64 20)
-; SSE-NEXT: [[TMP4:%.*]] = shufflevector <2 x i64> [[TMP3]], <2 x i64> poison, <2 x i32> <i32 1, i32 0>
-; SSE-NEXT: [[TMP5:%.*]] = add nuw nsw <2 x i64> [[TMP4]], zeroinitializer
-; SSE-NEXT: store <2 x i64> [[TMP5]], ptr undef, align 1
-; SSE-NEXT: [[TMP6:%.*]] = shufflevector <2 x i64> [[TMP5]], <2 x i64> poison, <2 x i32> <i32 1, i32 poison>
-; SSE-NEXT: [[TMP7:%.*]] = insertelement <2 x i64> [[TMP6]], i64 [[ADD]], i32 1
-; SSE-NEXT: [[TMP8:%.*]] = shl <2 x i64> [[TMP7]], splat (i64 2)
-; SSE-NEXT: [[TMP9:%.*]] = and <2 x i64> [[TMP8]], splat (i64 20)
-; SSE-NEXT: [[TMP10:%.*]] = lshr <2 x i64> [[TMP5]], splat (i64 6)
-; SSE-NEXT: [[TMP11:%.*]] = add nuw nsw <2 x i64> [[TMP9]], [[TMP10]]
-; SSE-NEXT: store <2 x i64> [[TMP11]], ptr [[ARRAYIDX2_2]], align 1
+; SSE-NEXT: [[TMP0:%.*]] = load i64, ptr [[P:%.*]], align 1
+; SSE-NEXT: [[AND:%.*]] = shl i64 [[TMP0]], 2
+; SSE-NEXT: [[SHL:%.*]] = and i64 [[AND]], 20
+; SSE-NEXT: [[ADD:%.*]] = add i64 [[C:%.*]], [[C]]
+; SSE-NEXT: store i64 [[ADD]], ptr [[P]], align 1
+; SSE-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds [0 x i64], ptr [[P]], i64 0, i64 5
+; SSE-NEXT: [[AND_1:%.*]] = shl i64 [[C]], 2
+; SSE-NEXT: [[SHL_1:%.*]] = and i64 [[AND_1]], 20
+; SSE-NEXT: [[SHR_1:%.*]] = lshr i64 [[C]], 6
+; SSE-NEXT: [[ADD_1:%.*]] = add nuw nsw i64 [[SHL]], [[SHR_1]]
+; SSE-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds [0 x i64], ptr [[P]], i64 0, i64 4
+; SSE-NEXT: [[SHR_2:%.*]] = lshr i64 [[C]], 6
+; SSE-NEXT: [[ADD_2:%.*]] = add nuw nsw i64 [[SHL_1]], [[SHR_2]]
+; SSE-NEXT: [[AND_4:%.*]] = shl i64 [[ADD]], 2
+; SSE-NEXT: [[SHL_4:%.*]] = and i64 [[AND_4]], 20
+; SSE-NEXT: [[ARRAYIDX2_5:%.*]] = getelementptr inbounds [0 x i64], ptr [[P]], i64 0, i64 1
+; SSE-NEXT: store i64 [[ADD_1]], ptr [[ARRAYIDX2_5]], align 1
+; SSE-NEXT: [[AND_5:%.*]] = shl nuw nsw i64 [[ADD_1]], 2
+; SSE-NEXT: [[SHL_5:%.*]] = and i64 [[AND_5]], 20
+; SSE-NEXT: [[SHR_5:%.*]] = lshr i64 [[ADD_1]], 6
+; SSE-NEXT: [[ADD_5:%.*]] = add nuw nsw i64 [[SHL_4]], [[SHR_5]]
+; SSE-NEXT: store i64 [[ADD_5]], ptr [[ARRAYIDX2_1]], align 1
+; SSE-NEXT: store i64 [[ADD_2]], ptr [[P]], align 1
+; SSE-NEXT: [[SHR_6:%.*]] = lshr i64 [[ADD_2]], 6
+; SSE-NEXT: [[ADD_6:%.*]] = add nuw nsw i64 [[SHL_5]], [[SHR_6]]
+; SSE-NEXT: store i64 [[ADD_6]], ptr [[ARRAYIDX2_2]], align 1
; SSE-NEXT: ret void
;
; AVX-LABEL: @pr35497(
; AVX-NEXT: entry:
-; AVX-NEXT: [[TMP0:%.*]] = load i64, ptr undef, align 1
-; AVX-NEXT: [[ADD:%.*]] = add i64 undef, undef
-; AVX-NEXT: store i64 [[ADD]], ptr undef, align 1
-; AVX-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds [0 x i64], ptr undef, i64 0, i64 4
-; AVX-NEXT: [[TMP1:%.*]] = insertelement <2 x i64> <i64 undef, i64 poison>, i64 [[TMP0]], i32 1
+; AVX-NEXT: [[TMP0:%.*]] = load i64, ptr [[P:%.*]], align 1
+; AVX-NEXT: [[TMP5:%.*]] = insertelement <2 x i64> poison, i64 [[C:%.*]], i32 0
+; AVX-NEXT: [[TMP11:%.*]] = shufflevector <2 x i64> [[TMP5]], <2 x i64> poison, <2 x i32> zeroinitializer
+; AVX-NEXT: [[TMP13:%.*]] = lshr <2 x i64> [[TMP11]], splat (i64 6)
+; AVX-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds [0 x i64], ptr [[P]], i64 0, i64 4
+; AVX-NEXT: [[ARRAYIDX2_5:%.*]] = getelementptr inbounds [0 x i64], ptr [[P]], i64 0, i64 1
+; AVX-NEXT: [[TMP1:%.*]] = insertelement <2 x i64> [[TMP11]], i64 [[TMP0]], i32 1
; AVX-NEXT: [[TMP2:%.*]] = shl <2 x i64> [[TMP1]], splat (i64 2)
; AVX-NEXT: [[TMP3:%.*]] = and <2 x i64> [[TMP2]], splat (i64 20)
-; AVX-NEXT: [[TMP4:%.*]] = add nuw nsw <2 x i64> [[TMP3]], zeroinitializer
-; AVX-NEXT: store <2 x i64> [[TMP4]], ptr undef, align 1
-; AVX-NEXT: [[TMP5:%.*]] = shufflevector <2 x i64> [[TMP4]], <2 x i64> poison, <2 x i32> <i32 1, i32 poison>
-; AVX-NEXT: [[TMP6:%.*]] = insertelement <2 x i64> [[TMP5]], i64 [[ADD]], i32 1
+; AVX-NEXT: [[TMP14:%.*]] = shufflevector <2 x i64> [[TMP3]], <2 x i64> [[TMP1]], <2 x i32> <i32 1, i32 2>
+; AVX-NEXT: [[TMP16:%.*]] = shufflevector <2 x i64> [[TMP13]], <2 x i64> [[TMP14]], <2 x i32> <i32 1, i32 3>
+; AVX-NEXT: [[TMP6:%.*]] = add <2 x i64> [[TMP14]], [[TMP16]]
+; AVX-NEXT: [[TMP17:%.*]] = extractelement <2 x i64> [[TMP6]], i32 1
+; AVX-NEXT: store i64 [[TMP17]], ptr [[P]], align 1
+; AVX-NEXT: [[TMP4:%.*]] = add nuw nsw <2 x i64> [[TMP3]], [[TMP13]]
+; AVX-NEXT: [[TMP12:%.*]] = extractelement <2 x i64> [[TMP6]], i32 0
+; AVX-NEXT: store i64 [[TMP12]], ptr [[ARRAYIDX2_5]], align 1
; AVX-NEXT: [[TMP7:%.*]] = shl <2 x i64> [[TMP6]], splat (i64 2)
; AVX-NEXT: [[TMP8:%.*]] = and <2 x i64> [[TMP7]], splat (i64 20)
+; AVX-NEXT: [[TMP15:%.*]] = extractelement <2 x i64> [[TMP4]], i32 0
+; AVX-NEXT: store i64 [[TMP15]], ptr [[P]], align 1
; AVX-NEXT: [[TMP9:%.*]] = lshr <2 x i64> [[TMP4]], splat (i64 6)
; AVX-NEXT: [[TMP10:%.*]] = add nuw nsw <2 x i64> [[TMP8]], [[TMP9]]
; AVX-NEXT: store <2 x i64> [[TMP10]], ptr [[ARRAYIDX2_2]], align 1
; AVX-NEXT: ret void
;
entry:
- %0 = load i64, ptr undef, align 1
+ %0 = load i64, ptr %p, align 1
%and = shl i64 %0, 2
%shl = and i64 %and, 20
- %add = add i64 undef, undef
- store i64 %add, ptr undef, align 1
- %arrayidx2.1 = getelementptr inbounds [0 x i64], ptr undef, i64 0, i64 5
- %and.1 = shl i64 undef, 2
+ %add = add i64 %c, %c
+ store i64 %add, ptr %p, align 1
+ %arrayidx2.1 = getelementptr inbounds [0 x i64], ptr %p, i64 0, i64 5
+ %and.1 = shl i64 %c, 2
%shl.1 = and i64 %and.1, 20
- %shr.1 = lshr i64 undef, 6
+ %shr.1 = lshr i64 %c, 6
%add.1 = add nuw nsw i64 %shl, %shr.1
- %arrayidx2.2 = getelementptr inbounds [0 x i64], ptr undef, i64 0, i64 4
- %shr.2 = lshr i64 undef, 6
+ %arrayidx2.2 = getelementptr inbounds [0 x i64], ptr %p, i64 0, i64 4
+ %shr.2 = lshr i64 %c, 6
%add.2 = add nuw nsw i64 %shl.1, %shr.2
%and.4 = shl i64 %add, 2
%shl.4 = and i64 %and.4, 20
- %arrayidx2.5 = getelementptr inbounds [0 x i64], ptr undef, i64 0, i64 1
+ %arrayidx2.5 = getelementptr inbounds [0 x i64], ptr %p, i64 0, i64 1
store i64 %add.1, ptr %arrayidx2.5, align 1
%and.5 = shl nuw nsw i64 %add.1, 2
%shl.5 = and i64 %and.5, 20
%shr.5 = lshr i64 %add.1, 6
%add.5 = add nuw nsw i64 %shl.4, %shr.5
store i64 %add.5, ptr %arrayidx2.1, align 1
- store i64 %add.2, ptr undef, align 1
+ store i64 %add.2, ptr %p, align 1
%shr.6 = lshr i64 %add.2, 6
%add.6 = add nuw nsw i64 %shl.5, %shr.6
store i64 %add.6, ptr %arrayidx2.2, align 1
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/redux-feed-buildvector.ll b/llvm/test/Transforms/SLPVectorizer/X86/redux-feed-buildvector.ll
index 1922e935..4527929 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/redux-feed-buildvector.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/redux-feed-buildvector.ll
@@ -10,17 +10,65 @@ declare void @llvm.masked.scatter.v2f64.v2p0(<2 x double>, <2 x ptr>, i32 immarg
define void @test(ptr nocapture readonly %arg, ptr nocapture readonly %arg1, ptr nocapture %arg2) {
; CHECK-LABEL: @test(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = insertelement <8 x ptr> poison, ptr [[ARG:%.*]], i32 0
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x ptr> [[TMP0]], <8 x ptr> poison, <8 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr double, <8 x ptr> [[TMP1]], <8 x i64> <i64 1, i64 3, i64 5, i64 7, i64 9, i64 11, i64 13, i64 15>
-; CHECK-NEXT: [[GEP2_0:%.*]] = getelementptr inbounds double, ptr [[ARG1:%.*]], i64 16
-; CHECK-NEXT: [[TMP3:%.*]] = call <8 x double> @llvm.masked.gather.v8f64.v8p0(<8 x ptr> [[TMP2]], i32 8, <8 x i1> splat (i1 true), <8 x double> poison)
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x double>, ptr [[GEP2_0]], align 8
-; CHECK-NEXT: [[TMP5:%.*]] = fmul fast <8 x double> [[TMP4]], [[TMP3]]
-; CHECK-NEXT: [[TMP6:%.*]] = load <8 x double>, ptr [[ARG1]], align 8
-; CHECK-NEXT: [[TMP7:%.*]] = fmul fast <8 x double> [[TMP6]], [[TMP3]]
-; CHECK-NEXT: [[TMP8:%.*]] = call fast double @llvm.vector.reduce.fadd.v8f64(double 0.000000e+00, <8 x double> [[TMP7]])
-; CHECK-NEXT: [[TMP9:%.*]] = call fast double @llvm.vector.reduce.fadd.v8f64(double 0.000000e+00, <8 x double> [[TMP5]])
+; CHECK-NEXT: [[GEP1_0:%.*]] = getelementptr inbounds double, ptr [[ARG:%.*]], i64 1
+; CHECK-NEXT: [[LD1_0:%.*]] = load double, ptr [[GEP1_0]], align 8
+; CHECK-NEXT: [[LD0_0:%.*]] = load double, ptr [[ARG1:%.*]], align 8
+; CHECK-NEXT: [[MUL1_0:%.*]] = fmul fast double [[LD0_0]], [[LD1_0]]
+; CHECK-NEXT: [[GEP2_0:%.*]] = getelementptr inbounds double, ptr [[ARG1]], i64 16
+; CHECK-NEXT: [[LD2_0:%.*]] = load double, ptr [[GEP2_0]], align 8
+; CHECK-NEXT: [[MUL2_0:%.*]] = fmul fast double [[LD2_0]], [[LD1_0]]
+; CHECK-NEXT: [[GEP1_1:%.*]] = getelementptr inbounds double, ptr [[ARG]], i64 3
+; CHECK-NEXT: [[LD1_1:%.*]] = load double, ptr [[GEP1_1]], align 8
+; CHECK-NEXT: [[GEP0_1:%.*]] = getelementptr inbounds double, ptr [[ARG1]], i64 1
+; CHECK-NEXT: [[LD0_1:%.*]] = load double, ptr [[GEP0_1]], align 8
+; CHECK-NEXT: [[MUL1_1:%.*]] = fmul fast double [[LD0_1]], [[LD1_1]]
+; CHECK-NEXT: [[RDX1_0:%.*]] = fadd fast double [[MUL1_0]], [[MUL1_1]]
+; CHECK-NEXT: [[GEP2_1:%.*]] = getelementptr inbounds double, ptr [[ARG1]], i64 17
+; CHECK-NEXT: [[LD2_1:%.*]] = load double, ptr [[GEP2_1]], align 8
+; CHECK-NEXT: [[MUL2_1:%.*]] = fmul fast double [[LD2_1]], [[LD1_1]]
+; CHECK-NEXT: [[RDX2_0:%.*]] = fadd fast double [[MUL2_0]], [[MUL2_1]]
+; CHECK-NEXT: [[GEP1_2:%.*]] = getelementptr inbounds double, ptr [[ARG]], i64 5
+; CHECK-NEXT: [[LD1_2:%.*]] = load double, ptr [[GEP1_2]], align 8
+; CHECK-NEXT: [[GEP0_2:%.*]] = getelementptr inbounds double, ptr [[ARG1]], i64 2
+; CHECK-NEXT: [[GEP2_2:%.*]] = getelementptr inbounds double, ptr [[ARG1]], i64 18
+; CHECK-NEXT: [[GEP1_3:%.*]] = getelementptr inbounds double, ptr [[ARG]], i64 7
+; CHECK-NEXT: [[LD1_3:%.*]] = load double, ptr [[GEP1_3]], align 8
+; CHECK-NEXT: [[GEP1_4:%.*]] = getelementptr inbounds double, ptr [[ARG]], i64 9
+; CHECK-NEXT: [[LD1_4:%.*]] = load double, ptr [[GEP1_4]], align 8
+; CHECK-NEXT: [[GEP1_5:%.*]] = getelementptr inbounds double, ptr [[ARG]], i64 11
+; CHECK-NEXT: [[LD1_5:%.*]] = load double, ptr [[GEP1_5]], align 8
+; CHECK-NEXT: [[TMP0:%.*]] = load <4 x double>, ptr [[GEP0_2]], align 8
+; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x double> poison, double [[LD1_2]], i32 0
+; CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x double> [[TMP1]], double [[LD1_3]], i32 1
+; CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x double> [[TMP2]], double [[LD1_4]], i32 2
+; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x double> [[TMP3]], double [[LD1_5]], i32 3
+; CHECK-NEXT: [[TMP5:%.*]] = fmul fast <4 x double> [[TMP0]], [[TMP4]]
+; CHECK-NEXT: [[TMP6:%.*]] = load <4 x double>, ptr [[GEP2_2]], align 8
+; CHECK-NEXT: [[TMP7:%.*]] = fmul fast <4 x double> [[TMP6]], [[TMP4]]
+; CHECK-NEXT: [[GEP1_6:%.*]] = getelementptr inbounds double, ptr [[ARG]], i64 13
+; CHECK-NEXT: [[LD1_6:%.*]] = load double, ptr [[GEP1_6]], align 8
+; CHECK-NEXT: [[GEP0_6:%.*]] = getelementptr inbounds double, ptr [[ARG1]], i64 6
+; CHECK-NEXT: [[LD0_6:%.*]] = load double, ptr [[GEP0_6]], align 8
+; CHECK-NEXT: [[MUL1_6:%.*]] = fmul fast double [[LD0_6]], [[LD1_6]]
+; CHECK-NEXT: [[GEP2_6:%.*]] = getelementptr inbounds double, ptr [[ARG1]], i64 22
+; CHECK-NEXT: [[LD2_6:%.*]] = load double, ptr [[GEP2_6]], align 8
+; CHECK-NEXT: [[MUL2_6:%.*]] = fmul fast double [[LD2_6]], [[LD1_6]]
+; CHECK-NEXT: [[GEP1_7:%.*]] = getelementptr inbounds double, ptr [[ARG]], i64 15
+; CHECK-NEXT: [[LD1_7:%.*]] = load double, ptr [[GEP1_7]], align 8
+; CHECK-NEXT: [[GEP0_7:%.*]] = getelementptr inbounds double, ptr [[ARG1]], i64 7
+; CHECK-NEXT: [[LD0_7:%.*]] = load double, ptr [[GEP0_7]], align 8
+; CHECK-NEXT: [[MUL1_7:%.*]] = fmul fast double [[LD0_7]], [[LD1_7]]
+; CHECK-NEXT: [[TMP10:%.*]] = call fast double @llvm.vector.reduce.fadd.v4f64(double 0.000000e+00, <4 x double> [[TMP5]])
+; CHECK-NEXT: [[OP_RDX3:%.*]] = fadd fast double [[TMP10]], [[MUL1_6]]
+; CHECK-NEXT: [[OP_RDX4:%.*]] = fadd fast double [[MUL1_7]], [[RDX1_0]]
+; CHECK-NEXT: [[TMP8:%.*]] = fadd fast double [[OP_RDX3]], [[OP_RDX4]]
+; CHECK-NEXT: [[GEP2_7:%.*]] = getelementptr inbounds double, ptr [[ARG1]], i64 23
+; CHECK-NEXT: [[LD2_7:%.*]] = load double, ptr [[GEP2_7]], align 8
+; CHECK-NEXT: [[MUL2_7:%.*]] = fmul fast double [[LD2_7]], [[LD1_7]]
+; CHECK-NEXT: [[TMP11:%.*]] = call fast double @llvm.vector.reduce.fadd.v4f64(double 0.000000e+00, <4 x double> [[TMP7]])
+; CHECK-NEXT: [[OP_RDX:%.*]] = fadd fast double [[TMP11]], [[MUL2_6]]
+; CHECK-NEXT: [[OP_RDX1:%.*]] = fadd fast double [[MUL2_7]], [[RDX2_0]]
+; CHECK-NEXT: [[TMP9:%.*]] = fadd fast double [[OP_RDX]], [[OP_RDX1]]
; CHECK-NEXT: [[I142:%.*]] = insertelement <2 x double> poison, double [[TMP8]], i64 0
; CHECK-NEXT: [[I143:%.*]] = insertelement <2 x double> [[I142]], double [[TMP9]], i64 1
; CHECK-NEXT: [[P:%.*]] = getelementptr inbounds double, ptr [[ARG2:%.*]], <2 x i64> <i64 0, i64 16>
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/redux-feed-insertelement.ll b/llvm/test/Transforms/SLPVectorizer/X86/redux-feed-insertelement.ll
index f0272d5..33c281d 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/redux-feed-insertelement.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/redux-feed-insertelement.ll
@@ -6,9 +6,25 @@ declare void @llvm.masked.scatter.v2f64.v2p0(<2 x double>, <2 x ptr>, i32 immarg
define void @rdx_feeds_single_insert(<2 x double> %v, ptr nocapture readonly %arg, ptr nocapture readonly %arg1, ptr nocapture %arg2) {
; CHECK-LABEL: @rdx_feeds_single_insert(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load <8 x double>, ptr [[ARG1:%.*]], align 8
-; CHECK-NEXT: [[TMP1:%.*]] = fmul fast <8 x double> [[TMP0]], <double 1.000000e+01, double 1.100000e+01, double 1.200000e+01, double 1.300000e+01, double 1.400000e+01, double 1.500000e+01, double 1.600000e+01, double 1.700000e+01>
-; CHECK-NEXT: [[TMP2:%.*]] = call fast double @llvm.vector.reduce.fadd.v8f64(double 0.000000e+00, <8 x double> [[TMP1]])
+; CHECK-NEXT: [[LD0_0:%.*]] = load double, ptr [[ARG1:%.*]], align 8
+; CHECK-NEXT: [[MUL1_0:%.*]] = fmul fast double [[LD0_0]], 1.000000e+01
+; CHECK-NEXT: [[GEP0_1:%.*]] = getelementptr inbounds double, ptr [[ARG1]], i64 1
+; CHECK-NEXT: [[LD0_1:%.*]] = load double, ptr [[GEP0_1]], align 8
+; CHECK-NEXT: [[MUL1_1:%.*]] = fmul fast double [[LD0_1]], 1.100000e+01
+; CHECK-NEXT: [[RDX1_0:%.*]] = fadd fast double [[MUL1_0]], [[MUL1_1]]
+; CHECK-NEXT: [[GEP0_2:%.*]] = getelementptr inbounds double, ptr [[ARG1]], i64 2
+; CHECK-NEXT: [[TMP0:%.*]] = load <4 x double>, ptr [[GEP0_2]], align 8
+; CHECK-NEXT: [[TMP1:%.*]] = fmul fast <4 x double> [[TMP0]], <double 1.200000e+01, double 1.300000e+01, double 1.400000e+01, double 1.500000e+01>
+; CHECK-NEXT: [[GEP0_6:%.*]] = getelementptr inbounds double, ptr [[ARG1]], i64 6
+; CHECK-NEXT: [[TMP10:%.*]] = call fast double @llvm.vector.reduce.fadd.v4f64(double 0.000000e+00, <4 x double> [[TMP1]])
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x double>, ptr [[GEP0_6]], align 8
+; CHECK-NEXT: [[TMP4:%.*]] = fmul fast <2 x double> [[TMP3]], <double 1.600000e+01, double 1.700000e+01>
+; CHECK-NEXT: [[TMP5:%.*]] = insertelement <2 x double> poison, double [[TMP10]], i32 0
+; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x double> [[TMP5]], double [[RDX1_0]], i32 1
+; CHECK-NEXT: [[TMP7:%.*]] = fadd fast <2 x double> [[TMP6]], [[TMP4]]
+; CHECK-NEXT: [[TMP8:%.*]] = extractelement <2 x double> [[TMP7]], i32 0
+; CHECK-NEXT: [[TMP9:%.*]] = extractelement <2 x double> [[TMP7]], i32 1
+; CHECK-NEXT: [[TMP2:%.*]] = fadd fast double [[TMP8]], [[TMP9]]
; CHECK-NEXT: [[I:%.*]] = insertelement <2 x double> [[V:%.*]], double [[TMP2]], i64 1
; CHECK-NEXT: [[P:%.*]] = getelementptr inbounds double, ptr [[ARG2:%.*]], <2 x i64> <i64 0, i64 16>
; CHECK-NEXT: call void @llvm.masked.scatter.v2f64.v2p0(<2 x double> [[I]], <2 x ptr> [[P]], i32 8, <2 x i1> splat (i1 true))
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/slp-fma-loss.ll b/llvm/test/Transforms/SLPVectorizer/X86/slp-fma-loss.ll
index 8c9f8b5..359c24b 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/slp-fma-loss.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/slp-fma-loss.ll
@@ -1,27 +1,39 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -passes=slp-vectorizer -S -mcpu=corei7 -mtriple=x86_64-unknown-linux-gnu -slp-threshold=-2 < %s | FileCheck %s --check-prefixes=CHECK
-; RUN: opt -passes=slp-vectorizer -S -mcpu=bdver2 -mtriple=x86_64-unknown-linux-gnu -slp-threshold=-2 < %s | FileCheck %s --check-prefixes=CHECK
-; RUN: opt -passes=slp-vectorizer -S -mcpu=core-avx2 -mtriple=x86_64-unknown-linux-gnu -slp-threshold=-2 < %s | FileCheck %s --check-prefixes=CHECK
+; RUN: opt -passes=slp-vectorizer -S -mcpu=corei7 -mtriple=x86_64-unknown-linux-gnu -slp-threshold=-2 < %s | FileCheck %s --check-prefixes=SSE4
+; RUN: opt -passes=slp-vectorizer -S -mcpu=bdver2 -mtriple=x86_64-unknown-linux-gnu -slp-threshold=-2 < %s | FileCheck %s --check-prefixes=AVX
+; RUN: opt -passes=slp-vectorizer -S -mcpu=core-avx2 -mtriple=x86_64-unknown-linux-gnu -slp-threshold=-2 < %s | FileCheck %s --check-prefixes=AVX
; This test checks for a case when a horizontal reduction of floating-point
; adds may look profitable, but is not because it eliminates generation of
; floating-point FMAs that would be more profitable.
-; FIXME: We generate a horizontal reduction today.
-
define void @hr() {
-; CHECK-LABEL: @hr(
-; CHECK-NEXT: br label [[LOOP:%.*]]
-; CHECK: loop:
-; CHECK-NEXT: [[PHI0:%.*]] = phi double [ 0.000000e+00, [[TMP0:%.*]] ], [ [[OP_RDX:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[CVT0:%.*]] = uitofp i16 0 to double
-; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x double> <double poison, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00>, double [[CVT0]], i32 0
-; CHECK-NEXT: [[TMP2:%.*]] = fmul fast <4 x double> zeroinitializer, [[TMP1]]
-; CHECK-NEXT: [[TMP3:%.*]] = call fast double @llvm.vector.reduce.fadd.v4f64(double 0.000000e+00, <4 x double> [[TMP2]])
-; CHECK-NEXT: [[OP_RDX]] = fadd fast double [[TMP3]], [[PHI0]]
-; CHECK-NEXT: br i1 true, label [[EXIT:%.*]], label [[LOOP]]
-; CHECK: exit:
-; CHECK-NEXT: ret void
+; SSE4-LABEL: @hr(
+; SSE4-NEXT: br label [[LOOP:%.*]]
+; SSE4: loop:
+; SSE4-NEXT: [[PHI0:%.*]] = phi double [ 0.000000e+00, [[TMP0:%.*]] ], [ [[OP_RDX:%.*]], [[LOOP]] ]
+; SSE4-NEXT: [[CVT0:%.*]] = uitofp i16 0 to double
+; SSE4-NEXT: [[TMP1:%.*]] = insertelement <4 x double> <double poison, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00>, double [[CVT0]], i32 0
+; SSE4-NEXT: [[TMP2:%.*]] = fmul fast <4 x double> zeroinitializer, [[TMP1]]
+; SSE4-NEXT: [[TMP3:%.*]] = call fast double @llvm.vector.reduce.fadd.v4f64(double 0.000000e+00, <4 x double> [[TMP2]])
+; SSE4-NEXT: [[OP_RDX]] = fadd fast double [[TMP3]], [[PHI0]]
+; SSE4-NEXT: br i1 true, label [[EXIT:%.*]], label [[LOOP]]
+; SSE4: exit:
+; SSE4-NEXT: ret void
+;
+; AVX-LABEL: @hr(
+; AVX-NEXT: br label [[LOOP:%.*]]
+; AVX: loop:
+; AVX-NEXT: [[PHI0:%.*]] = phi double [ 0.000000e+00, [[TMP0:%.*]] ], [ [[ADD3:%.*]], [[LOOP]] ]
+; AVX-NEXT: [[CVT0:%.*]] = uitofp i16 0 to double
+; AVX-NEXT: [[MUL0:%.*]] = fmul fast double 0.000000e+00, [[CVT0]]
+; AVX-NEXT: [[ADD0:%.*]] = fadd fast double [[MUL0]], [[PHI0]]
+; AVX-NEXT: [[ADD1:%.*]] = fadd fast double 0.000000e+00, [[ADD0]]
+; AVX-NEXT: [[ADD2:%.*]] = fadd fast double 0.000000e+00, [[ADD1]]
+; AVX-NEXT: [[ADD3]] = fadd fast double 0.000000e+00, [[ADD2]]
+; AVX-NEXT: br i1 true, label [[EXIT:%.*]], label [[LOOP]]
+; AVX: exit:
+; AVX-NEXT: ret void
;
br label %loop
@@ -47,18 +59,27 @@ exit:
; may look profitable; but both are not because this eliminates generation
; of floating-point FMAs that would be more profitable.
-; FIXME: We generate a horizontal reduction today, and if that's disabled, we
-; still vectorize some of the multiplies.
-
define double @hr_or_mul() {
-; CHECK-LABEL: @hr_or_mul(
-; CHECK-NEXT: [[CVT0:%.*]] = uitofp i16 3 to double
-; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x double> poison, double [[CVT0]], i32 0
-; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x double> [[TMP1]], <4 x double> poison, <4 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP3:%.*]] = fmul fast <4 x double> <double 7.000000e+00, double -4.300000e+01, double 2.200000e-02, double 9.500000e+00>, [[TMP2]]
-; CHECK-NEXT: [[TMP4:%.*]] = call fast double @llvm.vector.reduce.fadd.v4f64(double 0.000000e+00, <4 x double> [[TMP3]])
-; CHECK-NEXT: [[OP_RDX:%.*]] = fadd fast double [[TMP4]], [[CVT0]]
-; CHECK-NEXT: ret double [[OP_RDX]]
+; SSE4-LABEL: @hr_or_mul(
+; SSE4-NEXT: [[CVT0:%.*]] = uitofp i16 3 to double
+; SSE4-NEXT: [[TMP1:%.*]] = insertelement <4 x double> poison, double [[CVT0]], i32 0
+; SSE4-NEXT: [[TMP2:%.*]] = shufflevector <4 x double> [[TMP1]], <4 x double> poison, <4 x i32> zeroinitializer
+; SSE4-NEXT: [[TMP3:%.*]] = fmul fast <4 x double> <double 7.000000e+00, double -4.300000e+01, double 2.200000e-02, double 9.500000e+00>, [[TMP2]]
+; SSE4-NEXT: [[TMP4:%.*]] = call fast double @llvm.vector.reduce.fadd.v4f64(double 0.000000e+00, <4 x double> [[TMP3]])
+; SSE4-NEXT: [[OP_RDX:%.*]] = fadd fast double [[TMP4]], [[CVT0]]
+; SSE4-NEXT: ret double [[OP_RDX]]
+;
+; AVX-LABEL: @hr_or_mul(
+; AVX-NEXT: [[CVT0:%.*]] = uitofp i16 3 to double
+; AVX-NEXT: [[MUL0:%.*]] = fmul fast double 7.000000e+00, [[CVT0]]
+; AVX-NEXT: [[ADD0:%.*]] = fadd fast double [[MUL0]], [[CVT0]]
+; AVX-NEXT: [[MUL1:%.*]] = fmul fast double -4.300000e+01, [[CVT0]]
+; AVX-NEXT: [[ADD1:%.*]] = fadd fast double [[MUL1]], [[ADD0]]
+; AVX-NEXT: [[MUL2:%.*]] = fmul fast double 2.200000e-02, [[CVT0]]
+; AVX-NEXT: [[ADD2:%.*]] = fadd fast double [[MUL2]], [[ADD1]]
+; AVX-NEXT: [[MUL3:%.*]] = fmul fast double 9.500000e+00, [[CVT0]]
+; AVX-NEXT: [[ADD3:%.*]] = fadd fast double [[MUL3]], [[ADD2]]
+; AVX-NEXT: ret double [[ADD3]]
;
%cvt0 = uitofp i16 3 to double
%mul0 = fmul fast double 7.000000e+00, %cvt0
diff --git a/llvm/test/Transforms/SLPVectorizer/extracts-with-undefs.ll b/llvm/test/Transforms/SLPVectorizer/extracts-with-undefs.ll
index a64075d..5fe02cb 100644
--- a/llvm/test/Transforms/SLPVectorizer/extracts-with-undefs.ll
+++ b/llvm/test/Transforms/SLPVectorizer/extracts-with-undefs.ll
@@ -1,32 +1,57 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: %if x86-registered-target %{ opt < %s -passes=slp-vectorizer -S -mtriple=x86_64-unknown-linux-gnu | FileCheck %s %}
-; RUN: %if aarch64-registered-target %{ opt < %s -passes=slp-vectorizer -S -mtriple=aarch64-unknown-linux-gnu | FileCheck %s %}
+; RUN: %if x86-registered-target %{ opt < %s -passes=slp-vectorizer -S -mtriple=x86_64-unknown-linux-gnu | FileCheck %s --check-prefix=X86 %}
+; RUN: %if aarch64-registered-target %{ opt < %s -passes=slp-vectorizer -S -mtriple=aarch64-unknown-linux-gnu | FileCheck %s --check-prefix=AARCH64 %}
define void @test() {
-; CHECK-LABEL: @test(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: br label [[BODY:%.*]]
-; CHECK: body:
-; CHECK-NEXT: [[PHI1:%.*]] = phi double [ 0.000000e+00, [[ENTRY:%.*]] ], [ 0.000000e+00, [[BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = phi <2 x double> [ zeroinitializer, [[ENTRY]] ], [ zeroinitializer, [[BODY]] ]
-; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x double> <double poison, double 0.000000e+00>, double [[PHI1]], i32 0
-; CHECK-NEXT: [[TMP9:%.*]] = fmul fast <2 x double> <double 0.000000e+00, double undef>, [[TMP8]]
-; CHECK-NEXT: [[ADD8_I_I:%.*]] = call fast double @llvm.vector.reduce.fadd.v2f64(double 0.000000e+00, <2 x double> [[TMP9]])
-; CHECK-NEXT: [[CMP42_I:%.*]] = fcmp fast ole double [[ADD8_I_I]], 0.000000e+00
-; CHECK-NEXT: br i1 false, label [[BODY]], label [[EXIT:%.*]]
-; CHECK: exit:
-; CHECK-NEXT: br i1 false, label [[IF_THEN135_I:%.*]], label [[IF_END209_I:%.*]]
-; CHECK: if.then135.i:
-; CHECK-NEXT: [[TMP1:%.*]] = fcmp fast olt <2 x double> [[TMP0]], zeroinitializer
-; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <2 x i1> <i1 poison, i1 false>, <2 x i1> [[TMP1]], <2 x i32> <i32 2, i32 1>
-; CHECK-NEXT: [[TMP3:%.*]] = select <2 x i1> [[TMP2]], <2 x double> zeroinitializer, <2 x double> zeroinitializer
-; CHECK-NEXT: [[TMP4:%.*]] = fmul fast <2 x double> zeroinitializer, [[TMP3]]
-; CHECK-NEXT: [[TMP5:%.*]] = fmul fast <2 x double> [[TMP4]], zeroinitializer
-; CHECK-NEXT: [[TMP6:%.*]] = fadd fast <2 x double> [[TMP5]], zeroinitializer
-; CHECK-NEXT: br label [[IF_END209_I]]
-; CHECK: if.end209.i:
-; CHECK-NEXT: [[TMP7:%.*]] = phi <2 x double> [ [[TMP6]], [[IF_THEN135_I]] ], [ zeroinitializer, [[EXIT]] ]
-; CHECK-NEXT: ret void
+; X86-LABEL: @test(
+; X86-NEXT: entry:
+; X86-NEXT: br label [[BODY:%.*]]
+; X86: body:
+; X86-NEXT: [[PHI1:%.*]] = phi double [ 0.000000e+00, [[ENTRY:%.*]] ], [ 0.000000e+00, [[BODY]] ]
+; X86-NEXT: [[TMP0:%.*]] = phi <2 x double> [ zeroinitializer, [[ENTRY]] ], [ zeroinitializer, [[BODY]] ]
+; X86-NEXT: [[TMP1:%.*]] = insertelement <2 x double> <double poison, double 0.000000e+00>, double [[PHI1]], i32 0
+; X86-NEXT: [[TMP2:%.*]] = fmul fast <2 x double> <double 0.000000e+00, double undef>, [[TMP1]]
+; X86-NEXT: [[TMP3:%.*]] = call fast double @llvm.vector.reduce.fadd.v2f64(double 0.000000e+00, <2 x double> [[TMP2]])
+; X86-NEXT: [[CMP42_I:%.*]] = fcmp fast ole double [[TMP3]], 0.000000e+00
+; X86-NEXT: br i1 false, label [[BODY]], label [[EXIT:%.*]]
+; X86: exit:
+; X86-NEXT: br i1 false, label [[IF_THEN135_I:%.*]], label [[IF_END209_I:%.*]]
+; X86: if.then135.i:
+; X86-NEXT: [[TMP4:%.*]] = fcmp fast olt <2 x double> [[TMP0]], zeroinitializer
+; X86-NEXT: [[TMP5:%.*]] = shufflevector <2 x i1> <i1 poison, i1 false>, <2 x i1> [[TMP4]], <2 x i32> <i32 2, i32 1>
+; X86-NEXT: [[TMP6:%.*]] = select <2 x i1> [[TMP5]], <2 x double> zeroinitializer, <2 x double> zeroinitializer
+; X86-NEXT: [[TMP7:%.*]] = fmul fast <2 x double> zeroinitializer, [[TMP6]]
+; X86-NEXT: [[TMP8:%.*]] = fmul fast <2 x double> [[TMP7]], zeroinitializer
+; X86-NEXT: [[TMP9:%.*]] = fadd fast <2 x double> [[TMP8]], zeroinitializer
+; X86-NEXT: br label [[IF_END209_I]]
+; X86: if.end209.i:
+; X86-NEXT: [[TMP10:%.*]] = phi <2 x double> [ [[TMP9]], [[IF_THEN135_I]] ], [ zeroinitializer, [[EXIT]] ]
+; X86-NEXT: ret void
+;
+; AARCH64-LABEL: @test(
+; AARCH64-NEXT: entry:
+; AARCH64-NEXT: br label [[BODY:%.*]]
+; AARCH64: body:
+; AARCH64-NEXT: [[PHI1:%.*]] = phi double [ 0.000000e+00, [[ENTRY:%.*]] ], [ 0.000000e+00, [[BODY]] ]
+; AARCH64-NEXT: [[TMP0:%.*]] = phi <2 x double> [ zeroinitializer, [[ENTRY]] ], [ zeroinitializer, [[BODY]] ]
+; AARCH64-NEXT: [[MUL_I478_I:%.*]] = fmul fast double [[PHI1]], 0.000000e+00
+; AARCH64-NEXT: [[MUL7_I485_I:%.*]] = fmul fast double undef, 0.000000e+00
+; AARCH64-NEXT: [[ADD8_I_I:%.*]] = fadd fast double [[MUL_I478_I]], [[MUL7_I485_I]]
+; AARCH64-NEXT: [[CMP42_I:%.*]] = fcmp fast ole double [[ADD8_I_I]], 0.000000e+00
+; AARCH64-NEXT: br i1 false, label [[BODY]], label [[EXIT:%.*]]
+; AARCH64: exit:
+; AARCH64-NEXT: br i1 false, label [[IF_THEN135_I:%.*]], label [[IF_END209_I:%.*]]
+; AARCH64: if.then135.i:
+; AARCH64-NEXT: [[TMP1:%.*]] = fcmp fast olt <2 x double> [[TMP0]], zeroinitializer
+; AARCH64-NEXT: [[TMP2:%.*]] = shufflevector <2 x i1> <i1 poison, i1 false>, <2 x i1> [[TMP1]], <2 x i32> <i32 2, i32 1>
+; AARCH64-NEXT: [[TMP3:%.*]] = select <2 x i1> [[TMP2]], <2 x double> zeroinitializer, <2 x double> zeroinitializer
+; AARCH64-NEXT: [[TMP4:%.*]] = fmul fast <2 x double> zeroinitializer, [[TMP3]]
+; AARCH64-NEXT: [[TMP5:%.*]] = fmul fast <2 x double> [[TMP4]], zeroinitializer
+; AARCH64-NEXT: [[TMP6:%.*]] = fadd fast <2 x double> [[TMP5]], zeroinitializer
+; AARCH64-NEXT: br label [[IF_END209_I]]
+; AARCH64: if.end209.i:
+; AARCH64-NEXT: [[TMP7:%.*]] = phi <2 x double> [ [[TMP6]], [[IF_THEN135_I]] ], [ zeroinitializer, [[EXIT]] ]
+; AARCH64-NEXT: ret void
;
entry:
br label %body
diff --git a/llvm/test/Transforms/SLPVectorizer/insertelement-postpone.ll b/llvm/test/Transforms/SLPVectorizer/insertelement-postpone.ll
index 1e4b598..b5d74f0b 100644
--- a/llvm/test/Transforms/SLPVectorizer/insertelement-postpone.ll
+++ b/llvm/test/Transforms/SLPVectorizer/insertelement-postpone.ll
@@ -1,24 +1,45 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: %if x86-registered-target %{ opt -S -passes=slp-vectorizer -mtriple x86_64-unknown-linux-gnu < %s | FileCheck %s %}
-; RUN: %if aarch64-registered-target %{ opt -S -passes=slp-vectorizer -mtriple aarch64-unknown-linux-gnu < %s | FileCheck %s %}
+; RUN: %if x86-registered-target %{ opt -S -passes=slp-vectorizer -mtriple x86_64-unknown-linux-gnu < %s | FileCheck %s --check-prefix=X86 %}
+; RUN: %if aarch64-registered-target %{ opt -S -passes=slp-vectorizer -mtriple aarch64-unknown-linux-gnu < %s | FileCheck %s --check-prefix=AARCH86 %}
define <4 x double> @test(ptr %p2, double %i1754, double %i1781, double %i1778) {
-; CHECK-LABEL: @test(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[I1771:%.*]] = getelementptr inbounds double, ptr [[P2:%.*]], i64 54
-; CHECK-NEXT: [[I1772:%.*]] = load double, ptr [[I1771]], align 8
-; CHECK-NEXT: [[I1795:%.*]] = getelementptr inbounds double, ptr [[P2]], i64 55
-; CHECK-NEXT: [[I1796:%.*]] = load double, ptr [[I1795]], align 8
-; CHECK-NEXT: [[I1797:%.*]] = fmul fast double [[I1796]], [[I1781:%.*]]
-; CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x double> poison, double [[I1754:%.*]], i32 0
-; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x double> [[TMP0]], double [[I1778:%.*]], i32 1
-; CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x double> [[TMP1]], double [[I1781]], i32 2
-; CHECK-NEXT: [[TMP3:%.*]] = insertelement <4 x double> [[TMP2]], double [[I1772]], i32 3
-; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x double> [[TMP3]], <4 x double> poison, <4 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP5:%.*]] = fmul fast <4 x double> [[TMP3]], [[TMP4]]
-; CHECK-NEXT: [[TMP6:%.*]] = insertelement <4 x double> <double 1.000000e+00, double 1.000000e+00, double 1.000000e+00, double poison>, double [[I1797]], i32 3
-; CHECK-NEXT: [[TMP7:%.*]] = fadd fast <4 x double> [[TMP5]], [[TMP6]]
-; CHECK-NEXT: ret <4 x double> [[TMP7]]
+; X86-LABEL: @test(
+; X86-NEXT: entry:
+; X86-NEXT: [[I1771:%.*]] = getelementptr inbounds double, ptr [[P2:%.*]], i64 54
+; X86-NEXT: [[I1772:%.*]] = load double, ptr [[I1771]], align 8
+; X86-NEXT: [[I1795:%.*]] = getelementptr inbounds double, ptr [[P2]], i64 55
+; X86-NEXT: [[I1796:%.*]] = load double, ptr [[I1795]], align 8
+; X86-NEXT: [[I1797:%.*]] = fmul fast double [[I1796]], [[I1781:%.*]]
+; X86-NEXT: [[TMP0:%.*]] = insertelement <4 x double> poison, double [[I1754:%.*]], i32 0
+; X86-NEXT: [[TMP1:%.*]] = insertelement <4 x double> [[TMP0]], double [[I1778:%.*]], i32 1
+; X86-NEXT: [[TMP2:%.*]] = insertelement <4 x double> [[TMP1]], double [[I1781]], i32 2
+; X86-NEXT: [[TMP3:%.*]] = insertelement <4 x double> [[TMP2]], double [[I1772]], i32 3
+; X86-NEXT: [[TMP4:%.*]] = shufflevector <4 x double> [[TMP3]], <4 x double> poison, <4 x i32> zeroinitializer
+; X86-NEXT: [[TMP5:%.*]] = fmul fast <4 x double> [[TMP3]], [[TMP4]]
+; X86-NEXT: [[TMP6:%.*]] = insertelement <4 x double> <double 1.000000e+00, double 1.000000e+00, double 1.000000e+00, double poison>, double [[I1797]], i32 3
+; X86-NEXT: [[TMP7:%.*]] = fadd fast <4 x double> [[TMP5]], [[TMP6]]
+; X86-NEXT: ret <4 x double> [[TMP7]]
+;
+; AARCH86-LABEL: @test(
+; AARCH86-NEXT: entry:
+; AARCH86-NEXT: [[I1771:%.*]] = getelementptr inbounds double, ptr [[P2:%.*]], i64 54
+; AARCH86-NEXT: [[I1772:%.*]] = load double, ptr [[I1771]], align 8
+; AARCH86-NEXT: [[I1773:%.*]] = fmul fast double [[I1772]], [[I1754:%.*]]
+; AARCH86-NEXT: [[I1782:%.*]] = fmul fast double [[I1754]], [[I1754]]
+; AARCH86-NEXT: [[I1783:%.*]] = fadd fast double [[I1782]], 1.000000e+00
+; AARCH86-NEXT: [[I1787:%.*]] = fmul fast double [[I1778:%.*]], [[I1754]]
+; AARCH86-NEXT: [[I1788:%.*]] = fadd fast double [[I1787]], 1.000000e+00
+; AARCH86-NEXT: [[I1792:%.*]] = fmul fast double [[I1754]], [[I1781:%.*]]
+; AARCH86-NEXT: [[I1793:%.*]] = fadd fast double [[I1792]], 1.000000e+00
+; AARCH86-NEXT: [[I1795:%.*]] = getelementptr inbounds double, ptr [[P2]], i64 55
+; AARCH86-NEXT: [[I1796:%.*]] = load double, ptr [[I1795]], align 8
+; AARCH86-NEXT: [[I1797:%.*]] = fmul fast double [[I1796]], [[I1781]]
+; AARCH86-NEXT: [[TMP4:%.*]] = fadd fast double [[I1773]], [[I1797]]
+; AARCH86-NEXT: [[I1976:%.*]] = insertelement <4 x double> zeroinitializer, double [[I1783]], i64 0
+; AARCH86-NEXT: [[I1982:%.*]] = insertelement <4 x double> [[I1976]], double [[I1788]], i64 1
+; AARCH86-NEXT: [[I1988:%.*]] = insertelement <4 x double> [[I1982]], double [[I1793]], i64 2
+; AARCH86-NEXT: [[I1994:%.*]] = insertelement <4 x double> [[I1988]], double [[TMP4]], i64 3
+; AARCH86-NEXT: ret <4 x double> [[I1994]]
;
entry:
%i1771 = getelementptr inbounds double, ptr %p2, i64 54
diff --git a/llvm/test/Transforms/Scalarizer/intrinsics.ll b/llvm/test/Transforms/Scalarizer/intrinsics.ll
index cee44ef..070c765 100644
--- a/llvm/test/Transforms/Scalarizer/intrinsics.ll
+++ b/llvm/test/Transforms/Scalarizer/intrinsics.ll
@@ -8,6 +8,7 @@ declare <2 x float> @llvm.sqrt.v2f32(<2 x float>)
declare <2 x float> @llvm.minnum.v2f32(<2 x float>, <2 x float>)
declare <2 x float> @llvm.minimum.v2f32(<2 x float>, <2 x float>)
declare <2 x float> @llvm.maximum.v2f32(<2 x float>, <2 x float>)
+declare <2 x float> @llvm.ldexp.v2f32.v2i32(<2 x float>, <2 x i32>)
; Ternary fp
declare <2 x float> @llvm.fma.v2f32(<2 x float>, <2 x float>, <2 x float>)
@@ -32,6 +33,8 @@ declare <2 x i32> @llvm.fptoui.sat.v2i32.v2f32(<2 x float>)
; Unary fp operand, int return type
declare <2 x i32> @llvm.lrint.v2i32.v2f32(<2 x float>)
declare <2 x i32> @llvm.llrint.v2i32.v2f32(<2 x float>)
+declare <2 x i32> @llvm.lround.v2i32.v2f32(<2 x float>)
+declare <2 x i32> @llvm.llround.v2i32.v2f32(<2 x float>)
; Bool return type, overloaded on fp operand type
declare <2 x i1> @llvm.is.fpclass(<2 x float>, i32)
@@ -159,6 +162,22 @@ define <2 x float> @scalarize_powi_v2f32(<2 x float> %x, i32 %y) #0 {
ret <2 x float> %powi
}
+define <2 x float> @scalarize_ldexp_v2f32(<2 x float> %x, <2 x i32> %y) #0 {
+; CHECK-LABEL: @scalarize_ldexp_v2f32(
+; CHECK-NEXT: [[X_I0:%.*]] = extractelement <2 x float> [[X:%.*]], i64 0
+; CHECK-NEXT: [[Y:%.*]] = extractelement <2 x i32> [[Y1:%.*]], i64 0
+; CHECK-NEXT: [[POWI_I0:%.*]] = call float @llvm.ldexp.f32.i32(float [[X_I0]], i32 [[Y]])
+; CHECK-NEXT: [[X_I1:%.*]] = extractelement <2 x float> [[X]], i64 1
+; CHECK-NEXT: [[Y_I1:%.*]] = extractelement <2 x i32> [[Y1]], i64 1
+; CHECK-NEXT: [[POWI_I1:%.*]] = call float @llvm.ldexp.f32.i32(float [[X_I1]], i32 [[Y_I1]])
+; CHECK-NEXT: [[POWI_UPTO0:%.*]] = insertelement <2 x float> poison, float [[POWI_I0]], i64 0
+; CHECK-NEXT: [[POWI:%.*]] = insertelement <2 x float> [[POWI_UPTO0]], float [[POWI_I1]], i64 1
+; CHECK-NEXT: ret <2 x float> [[POWI]]
+;
+ %powi = call <2 x float> @llvm.ldexp.v2f32.v2i32(<2 x float> %x, <2 x i32> %y)
+ ret <2 x float> %powi
+}
+
define <2 x i32> @scalarize_smul_fix_sat_v2i32(<2 x i32> %x) #0 {
; CHECK-LABEL: @scalarize_smul_fix_sat_v2i32(
; CHECK-NEXT: [[X_I0:%.*]] = extractelement <2 x i32> [[X:%.*]], i64 0
@@ -243,6 +262,34 @@ define <2 x i32> @scalarize_llrint(<2 x float> %x) #0 {
ret <2 x i32> %rnd
}
+define <2 x i32> @scalarize_lround(<2 x float> %x) #0 {
+; CHECK-LABEL: @scalarize_lround(
+; CHECK-NEXT: [[X_I0:%.*]] = extractelement <2 x float> [[X:%.*]], i64 0
+; CHECK-NEXT: [[RND_I0:%.*]] = call i32 @llvm.lround.i32.f32(float [[X_I0]])
+; CHECK-NEXT: [[X_I1:%.*]] = extractelement <2 x float> [[X]], i64 1
+; CHECK-NEXT: [[RND_I1:%.*]] = call i32 @llvm.lround.i32.f32(float [[X_I1]])
+; CHECK-NEXT: [[RND_UPTO0:%.*]] = insertelement <2 x i32> poison, i32 [[RND_I0]], i64 0
+; CHECK-NEXT: [[RND:%.*]] = insertelement <2 x i32> [[RND_UPTO0]], i32 [[RND_I1]], i64 1
+; CHECK-NEXT: ret <2 x i32> [[RND]]
+;
+ %rnd = call <2 x i32> @llvm.lround.v2i32.v2f32(<2 x float> %x)
+ ret <2 x i32> %rnd
+}
+
+define <2 x i32> @scalarize_llround(<2 x float> %x) #0 {
+; CHECK-LABEL: @scalarize_llround(
+; CHECK-NEXT: [[X_I0:%.*]] = extractelement <2 x float> [[X:%.*]], i64 0
+; CHECK-NEXT: [[RND_I0:%.*]] = call i32 @llvm.llround.i32.f32(float [[X_I0]])
+; CHECK-NEXT: [[X_I1:%.*]] = extractelement <2 x float> [[X]], i64 1
+; CHECK-NEXT: [[RND_I1:%.*]] = call i32 @llvm.llround.i32.f32(float [[X_I1]])
+; CHECK-NEXT: [[RND_UPTO0:%.*]] = insertelement <2 x i32> poison, i32 [[RND_I0]], i64 0
+; CHECK-NEXT: [[RND:%.*]] = insertelement <2 x i32> [[RND_UPTO0]], i32 [[RND_I1]], i64 1
+; CHECK-NEXT: ret <2 x i32> [[RND]]
+;
+ %rnd = call <2 x i32> @llvm.llround.v2i32.v2f32(<2 x float> %x)
+ ret <2 x i32> %rnd
+}
+
define <2 x i1> @scalarize_is_fpclass(<2 x float> %x) #0 {
; CHECK-LABEL: @scalarize_is_fpclass(
; CHECK-NEXT: [[X_I0:%.*]] = extractelement <2 x float> [[X:%.*]], i64 0
diff --git a/llvm/test/tools/llvm-objdump/MachO/bad-trie.test b/llvm/test/tools/llvm-objdump/MachO/bad-trie.test
index 8b29d30..e4d0ed5 100644
--- a/llvm/test/tools/llvm-objdump/MachO/bad-trie.test
+++ b/llvm/test/tools/llvm-objdump/MachO/bad-trie.test
@@ -11,7 +11,7 @@ RUN: not llvm-objdump --macho --exports-trie %p/Inputs/macho-trie-export-info-si
EXPORT_INFO_SIZE_TOO_BIG: macho-trie-export-info-size-too-big': truncated or malformed object (export info size: 0x1234 in export trie data at node: 0x33 too big and extends past end of trie data)
RUN: not llvm-objdump --macho --exports-trie %p/Inputs/macho-trie-children-count-byte 2>&1 | FileCheck --check-prefix CHILDREN_COUNT_BYTE %s
-CHILDREN_COUNT_BYTE: macho-trie-children-count-byte': truncated or malformed object (byte for count of childern in export trie data at node: 0x5 extends past end of trie data)
+CHILDREN_COUNT_BYTE: macho-trie-children-count-byte': truncated or malformed object (byte for count of children in export trie data at node: 0x5 extends past end of trie data)
RUN: not llvm-objdump --macho --exports-trie %p/Inputs/macho-trie-import-name-start 2>&1 | FileCheck --check-prefix IMPORT_NAME_START %s
IMPORT_NAME_START: macho-trie-import-name-start': truncated or malformed object (import name of re-export in export trie data at node: 0x33 starts past end of trie data)
@@ -25,8 +25,8 @@ EDGE_STRING_END: macho-trie-edge-string-end': truncated or malformed object (edg
RUN: not llvm-objdump --macho --exports-trie %p/Inputs/macho-trie-not-export-node 2>&1 | FileCheck --check-prefix NOT_EXPORT_NODE %s
NOT_EXPORT_NODE: macho-trie-not-export-node': truncated or malformed object (node is not an export node in export trie data at node: 0x5a)
-RUN: not llvm-objdump --macho --exports-trie %p/Inputs/macho-trie-node-loop 2>&1 | FileCheck --check-prefix LOOP_OF_CHILDERN %s
-LOOP_OF_CHILDERN: macho-trie-node-loop': truncated or malformed object (loop in childern in export trie data at node: 0x42 back to node: 0x5)
+RUN: not llvm-objdump --macho --exports-trie %p/Inputs/macho-trie-node-loop 2>&1 | FileCheck --check-prefix LOOP_OF_CHILDREN %s
+LOOP_OF_CHILDREN: macho-trie-node-loop': truncated or malformed object (loop in children in export trie data at node: 0x42 back to node: 0x5)
RUN: not llvm-objdump --macho --exports-trie %p/Inputs/macho-trie-bad-library-ordinal 2>&1 | FileCheck --check-prefix BAD_LIBRARY_ORDINAL %s
BAD_LIBRARY_ORDINAL: macho-trie-bad-library-ordinal': truncated or malformed object (bad library ordinal: 69 (max 3) in export trie data at node: 0x33)